bool SFB::Audio::Converter::Open(CFErrorRef *error) { if(!mDecoder) return false; // Open the decoder if necessary if(!mDecoder->IsOpen() && !mDecoder->Open(error)) { if(error) LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "Error opening decoder: " << error); return false; } AudioStreamBasicDescription inputFormat = mDecoder->GetFormat(); OSStatus result = AudioConverterNew(&inputFormat, &mFormat, &mConverter); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "AudioConverterNewfailed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'"); if(error) *error = CFErrorCreate(kCFAllocatorDefault, kCFErrorDomainOSStatus, result, nullptr); return false; } // TODO: Set kAudioConverterPropertyCalculateInputBufferSize mConverterState = std::unique_ptr<ConverterStateData>(new ConverterStateData(*mDecoder)); mConverterState->AllocateBufferList(BUFFER_SIZE_FRAMES); // Create the channel map if(mChannelLayout) { SInt32 channelMap [ mFormat.mChannelsPerFrame ]; UInt32 dataSize = (UInt32)sizeof(channelMap); const AudioChannelLayout *channelLayouts [] = { mDecoder->GetChannelLayout(), mChannelLayout }; result = AudioFormatGetProperty(kAudioFormatProperty_ChannelMap, sizeof(channelLayouts), channelLayouts, &dataSize, channelMap); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "AudioFormatGetProperty (kAudioFormatProperty_ChannelMap) failed: " << result); if(error) *error = CFErrorCreate(kCFAllocatorDefault, kCFErrorDomainOSStatus, result, nullptr); return false; } } mIsOpen = true; return true; }
Semaphore::~Semaphore() { kern_return_t result = semaphore_destroy(mach_task_self(), mSemaphore); if(KERN_SUCCESS != result) LOGGER_ERR("org.sbooth.AudioEngine.Semaphore", "semaphore_destroy failed: " << mach_error_string(result)); }
Guard::~Guard() { int success = pthread_cond_destroy(&mCondition); if(0 != success) LOGGER_ERR("org.sbooth.AudioEngine.Guard", "pthread_cond_destroy failed: " << strerror(success)); }
void HTTPInputSource::HandleNetworkEvent(CFReadStreamRef stream, CFStreamEventType type) { switch(type) { case kCFStreamEventOpenCompleted: mOffset = mDesiredOffset; break; case kCFStreamEventHasBytesAvailable: if(nullptr == mResponseHeaders) { CFTypeRef responseHeader = CFReadStreamCopyProperty(stream, kCFStreamPropertyHTTPResponseHeader); if(responseHeader) { mResponseHeaders = CFHTTPMessageCopyAllHeaderFields(static_cast<CFHTTPMessageRef>(const_cast<void *>(responseHeader))); CFRelease(responseHeader), responseHeader = nullptr; } } break; case kCFStreamEventErrorOccurred: { CFErrorRef error = CFReadStreamCopyError(stream); if(error) { LOGGER_ERR("org.sbooth.AudioEngine.InputSource.HTTP", "Error: " << error); CFRelease(error), error = nullptr; } break; } case kCFStreamEventEndEncountered: mEOSReached = true; break; } }
SInt64 SFB::Audio::MonkeysAudioDecoder::_SeekToFrame(SInt64 frame) { if(ERROR_SUCCESS != mDecompressor->Seek((int)frame)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.MonkeysAudio", "mDecompressor->Seek() failed"); return -1; } return this->GetCurrentFrame(); }
SInt64 SFB::Audio::CoreAudioDecoder::_GetTotalFrames() const { SInt64 totalFrames = -1; UInt32 dataSize = sizeof(totalFrames); OSStatus result = ExtAudioFileGetProperty(mExtAudioFile, kExtAudioFileProperty_FileLengthFrames, &dataSize, &totalFrames); if(noErr != result) LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetProperty (kExtAudioFileProperty_FileLengthFrames) failed: " << result); return totalFrames; }
UInt32 SFB::Audio::MonkeysAudioDecoder::_ReadAudio(AudioBufferList *bufferList, UInt32 frameCount) { int blocksRead = 0; if(ERROR_SUCCESS != mDecompressor->GetData((char *)bufferList->mBuffers[0].mData, (int)frameCount, &blocksRead)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.MonkeysAudio", "Monkey's Audio invalid checksum"); return 0; } bufferList->mBuffers[0].mDataByteSize = (UInt32)blocksRead * mFormat.mBytesPerFrame; bufferList->mBuffers[0].mNumberChannels = mFormat.mChannelsPerFrame; return (UInt32)blocksRead; }
bool SFB::Audio::Converter::Reset() { if(!IsOpen()) return false; OSStatus result = AudioConverterReset(mConverter); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "AudioConverterReset failed: " << result); return false; } return true; }
SInt64 SFB::Audio::CoreAudioDecoder::_SeekToFrame(SInt64 frame) { OSStatus result = ExtAudioFileSeek(mExtAudioFile, frame); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileSeek failed: " << result); return -1; } if(mUseM4AWorkarounds) mCurrentFrame = frame; return _GetCurrentFrame(); }
UInt32 SFB::Audio::CoreAudioDecoder::_ReadAudio(AudioBufferList *bufferList, UInt32 frameCount) { OSStatus result = ExtAudioFileRead(mExtAudioFile, &frameCount, bufferList); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileRead failed: " << result); return 0; } if(mUseM4AWorkarounds) mCurrentFrame += frameCount; return frameCount; }
UInt32 SFB::Audio::TrueAudioDecoder::_ReadAudio(AudioBufferList *bufferList, UInt32 frameCount) { if(bufferList->mBuffers[0].mNumberChannels != mFormat.mChannelsPerFrame) { LOGGER_WARNING("org.sbooth.AudioEngine.Decoder.TrueAudio", "_ReadAudio() called with invalid parameters"); return 0; } // Reset output buffer data size for(UInt32 i = 0; i < bufferList->mNumberBuffers; ++i) bufferList->mBuffers[i].mDataByteSize = 0; UInt32 framesRead = 0; bool eos = false; try { while(mFramesToSkip && !eos) { if(mFramesToSkip >= frameCount) { framesRead = (UInt32)mDecoder->process_stream((TTAuint8 *)bufferList->mBuffers[0].mData, frameCount); mFramesToSkip -= framesRead; } else { framesRead = (UInt32)mDecoder->process_stream((TTAuint8 *)bufferList->mBuffers[0].mData, mFramesToSkip); mFramesToSkip = 0; } if(0 == framesRead) eos = true; } if(!eos) { framesRead = (UInt32)mDecoder->process_stream((TTAuint8 *)bufferList->mBuffers[0].mData, frameCount); if(0 == framesRead) eos = true; } } catch(tta::tta_exception e) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.TrueAudio", "True Audio decoding error: " << e.code()); return 0; } if(eos) return 0; bufferList->mBuffers[0].mDataByteSize = (UInt32)(framesRead * mFormat.mBytesPerFrame); bufferList->mBuffers[0].mNumberChannels = mFormat.mChannelsPerFrame; mCurrentFrame += framesRead; return framesRead; }
SInt64 SFB::Audio::CoreAudioDecoder::_GetCurrentFrame() const { if(mUseM4AWorkarounds) return mCurrentFrame; SInt64 currentFrame = -1; OSStatus result = ExtAudioFileTell(mExtAudioFile, ¤tFrame); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileTell failed: " << result); return -1; } return currentFrame; }
SFB::CFString SFB::Audio::CoreAudioDecoder::_GetSourceFormatDescription() const { CFStringRef sourceFormatDescription = nullptr; UInt32 sourceFormatNameSize = sizeof(sourceFormatDescription); OSStatus result = AudioFormatGetProperty(kAudioFormatProperty_FormatName, sizeof(mSourceFormat), &mSourceFormat, &sourceFormatNameSize, &sourceFormatDescription); if(noErr != result) LOGGER_ERR("org.sbooth.AudioEngine.Decoder", "AudioFormatGetProperty (kAudioFormatProperty_FormatName) failed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'"); return sourceFormatDescription; }
SInt64 SFB::Audio::MODDecoder::_SeekToFrame(SInt64 frame) { // DUMB cannot seek backwards, so the decoder must be reset if(frame < mCurrentFrame) { if(!_Close(nullptr) || !mInputSource->SeekToOffset(0) || !_Open(nullptr)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.MOD", "Error reseting DUMB decoder"); return -1; } mCurrentFrame = 0; } long framesToSkip = frame - mCurrentFrame; duh_sigrenderer_generate_samples(dsr.get(), 1, 65536.0f / DUMB_SAMPLE_RATE, framesToSkip, nullptr); mCurrentFrame += framesToSkip; return mCurrentFrame; }
CFArrayRef SFB::Audio::CoreAudioDecoder::CreateSupportedFileExtensions() { CFArrayRef supportedExtensions = nullptr; UInt32 size = sizeof(supportedExtensions); OSStatus result = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_AllExtensions, 0, nullptr, &size, &supportedExtensions); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileGetGlobalInfo (kAudioFileGlobalInfo_AllExtensions) failed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'"); return nullptr; } return supportedExtensions; }
CFArrayRef SFB::Audio::CoreAudioDecoder::CreateSupportedMIMETypes() { CFArrayRef supportedMIMETypes = nullptr; UInt32 size = sizeof(supportedMIMETypes); OSStatus result = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_AllMIMETypes, 0, nullptr, &size, &supportedMIMETypes); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileGetGlobalInfo (kAudioFileGlobalInfo_AllMIMETypes) failed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'"); return nullptr; } return CFArrayCreateCopy(kCFAllocatorDefault, supportedMIMETypes); }
CFStringRef SFB::Audio::Decoder::CreateChannelLayoutDescription() const { if(!IsOpen()) { LOGGER_INFO("org.sbooth.AudioEngine.Decoder", "CreateChannelLayoutDescription() called on a Decoder that hasn't been opened"); return nullptr; } CFStringRef channelLayoutDescription = nullptr; UInt32 specifierSize = sizeof(channelLayoutDescription); OSStatus result = AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutName, sizeof(mChannelLayout.GetACL()), mChannelLayout.GetACL(), &specifierSize, &channelLayoutDescription); if(noErr != result) LOGGER_ERR("org.sbooth.AudioEngine.Decoder", "AudioFormatGetProperty (kAudioFormatProperty_ChannelLayoutName) failed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'"); return channelLayoutDescription; }
SInt64 SFB::Audio::TrueAudioDecoder::_SeekToFrame(SInt64 frame) { TTAuint32 seconds = (TTAuint32)(frame / mSourceFormat.mSampleRate); TTAuint32 frame_start = 0; try { mDecoder->set_position(seconds, &frame_start); } catch(tta::tta_exception e) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.TrueAudio", "True Audio seek error: " << e.code()); return -1; } mCurrentFrame = frame; // We need to skip some samples from start of the frame if required mFramesToSkip = UInt32((seconds - frame_start) * mSourceFormat.mSampleRate + 0.5); return mCurrentFrame; }
bool SetAPETagFromMetadata(const AudioMetadata& metadata, TagLib::APE::Tag *tag, bool setAlbumArt) { if(nullptr == tag) return false; // Standard tags SetAPETag(tag, "ALBUM", metadata.GetAlbumTitle()); SetAPETag(tag, "ARTIST", metadata.GetArtist()); SetAPETag(tag, "ALBUMARTIST", metadata.GetAlbumArtist()); SetAPETag(tag, "COMPOSER", metadata.GetComposer()); SetAPETag(tag, "GENRE", metadata.GetGenre()); SetAPETag(tag, "DATE", metadata.GetReleaseDate()); SetAPETag(tag, "DESCRIPTION", metadata.GetComment()); SetAPETag(tag, "TITLE", metadata.GetTitle()); SetAPETagNumber(tag, "TRACKNUMBER", metadata.GetTrackNumber()); SetAPETagNumber(tag, "TRACKTOTAL", metadata.GetTrackTotal()); SetAPETagBoolean(tag, "COMPILATION", metadata.GetCompilation()); SetAPETagNumber(tag, "DISCNUMBER", metadata.GetDiscNumber()); SetAPETagNumber(tag, "DISCTOTAL", metadata.GetDiscTotal()); SetAPETagNumber(tag, "BPM", metadata.GetBPM()); SetAPETagNumber(tag, "RATING", metadata.GetRating()); SetAPETag(tag, "ISRC", metadata.GetISRC()); SetAPETag(tag, "MCN", metadata.GetMCN()); SetAPETag(tag, "TITLESORT", metadata.GetTitleSortOrder()); SetAPETag(tag, "ALBUMTITLESORT", metadata.GetAlbumTitleSortOrder()); SetAPETag(tag, "ARTISTSORT", metadata.GetArtistSortOrder()); SetAPETag(tag, "ALBUMARTISTSORT", metadata.GetAlbumArtistSortOrder()); SetAPETag(tag, "COMPOSERSORT", metadata.GetComposerSortOrder()); SetAPETag(tag, "GROUPING", metadata.GetGrouping()); // Additional metadata CFDictionaryRef additionalMetadata = metadata.GetAdditionalMetadata(); if(nullptr != additionalMetadata) { CFIndex count = CFDictionaryGetCount(additionalMetadata); const void * keys [count]; const void * values [count]; CFDictionaryGetKeysAndValues(additionalMetadata, reinterpret_cast<const void **>(keys), reinterpret_cast<const void **>(values)); for(CFIndex i = 0; i < count; ++i) { CFIndex keySize = CFStringGetMaximumSizeForEncoding(CFStringGetLength(reinterpret_cast<CFStringRef>(keys[i])), kCFStringEncodingASCII); char key [keySize + 1]; if(!CFStringGetCString(reinterpret_cast<CFStringRef>(keys[i]), key, keySize + 1, kCFStringEncodingASCII)) { LOGGER_ERR("org.sbooth.AudioEngine", "CFStringGetCString failed"); continue; } SetAPETag(tag, key, reinterpret_cast<CFStringRef>(values[i])); } } // ReplayGain info SetAPETagDouble(tag, "REPLAYGAIN_REFERENCE_LOUDNESS", metadata.GetReplayGainReferenceLoudness(), CFSTR("%2.1f dB")); SetAPETagDouble(tag, "REPLAYGAIN_TRACK_GAIN", metadata.GetReplayGainTrackGain(), CFSTR("%+2.2f dB")); SetAPETagDouble(tag, "REPLAYGAIN_TRACK_PEAK", metadata.GetReplayGainTrackPeak(), CFSTR("%1.8f")); SetAPETagDouble(tag, "REPLAYGAIN_ALBUM_GAIN", metadata.GetReplayGainAlbumGain(), CFSTR("%+2.2f dB")); SetAPETagDouble(tag, "REPLAYGAIN_ALBUM_PEAK", metadata.GetReplayGainAlbumPeak(), CFSTR("%1.8f")); // Album art if(setAlbumArt) { tag->removeItem("Cover Art (Front)"); tag->removeItem("Cover Art (Back)"); #if 0 tag->removeItem("METADATA_BLOCK_PICTURE"); #endif for(auto attachedPicture : metadata.GetAttachedPictures()) { // APE can handle front and back covers natively if(AttachedPicture::Type::FrontCover == attachedPicture->GetType() || AttachedPicture::Type::FrontCover == attachedPicture->GetType()) { TagLib::ByteVector data; if(attachedPicture->GetDescription()) data.append(TagLib::StringFromCFString(attachedPicture->GetDescription()).data(TagLib::String::UTF8)); data.append('\0'); data.append(TagLib::ByteVector((const char *)CFDataGetBytePtr(attachedPicture->GetData()), (TagLib::uint)CFDataGetLength(attachedPicture->GetData()))); if(AttachedPicture::Type::FrontCover == attachedPicture->GetType()) tag->setData("Cover Art (Front)", data); else if(AttachedPicture::Type::BackCover == attachedPicture->GetType()) tag->setData("Cover Art (Back)", data); } #if 0 else { CGImageSourceRef imageSource = CGImageSourceCreateWithData(attachedPicture->GetData(), nullptr); if(nullptr == imageSource) return false; TagLib::FLAC::Picture picture; picture.setData(TagLib::ByteVector((const char *)CFDataGetBytePtr(attachedPicture->GetData()), (TagLib::uint)CFDataGetLength(attachedPicture->GetData()))); picture.setType(static_cast<TagLib::FLAC::Picture::Type>(attachedPicture->GetType())); if(attachedPicture->GetDescription()) picture.setDescription(TagLib::StringFromCFString(attachedPicture->GetDescription())); // Convert the image's UTI into a MIME type CFStringRef mimeType = UTTypeCopyPreferredTagWithClass(CGImageSourceGetType(imageSource), kUTTagClassMIMEType); if(mimeType) { picture.setMimeType(TagLib::StringFromCFString(mimeType)); CFRelease(mimeType), mimeType = nullptr; } // Flesh out the height, width, and depth CFDictionaryRef imagePropertiesDictionary = CGImageSourceCopyPropertiesAtIndex(imageSource, 0, nullptr); if(imagePropertiesDictionary) { CFNumberRef imageWidth = (CFNumberRef)CFDictionaryGetValue(imagePropertiesDictionary, kCGImagePropertyPixelWidth); CFNumberRef imageHeight = (CFNumberRef)CFDictionaryGetValue(imagePropertiesDictionary, kCGImagePropertyPixelHeight); CFNumberRef imageDepth = (CFNumberRef)CFDictionaryGetValue(imagePropertiesDictionary, kCGImagePropertyDepth); int height, width, depth; // Ignore numeric conversion errors CFNumberGetValue(imageWidth, kCFNumberIntType, &width); CFNumberGetValue(imageHeight, kCFNumberIntType, &height); CFNumberGetValue(imageDepth, kCFNumberIntType, &depth); picture.setHeight(height); picture.setWidth(width); picture.setColorDepth(depth); CFRelease(imagePropertiesDictionary), imagePropertiesDictionary = nullptr; } TagLib::ByteVector encodedBlock = TagLib::EncodeBase64(picture.render()); tag->addValue("METADATA_BLOCK_PICTURE", TagLib::String(encodedBlock, TagLib::String::UTF8), false); CFRelease(imageSource), imageSource = nullptr; } #endif } } return true; }
bool SFB::Audio::CoreAudioDecoder::_Open(CFErrorRef *error) { // Open the input file OSStatus result = AudioFileOpenWithCallbacks(this, myAudioFile_ReadProc, nullptr, myAudioFile_GetSizeProc, nullptr, 0, &mAudioFile); if(noErr != result) { LOGGER_CRIT("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileOpenWithCallbacks failed: " << result); if(error) { SFB::CFString description = CFCopyLocalizedString(CFSTR("The format of the file “%@” was not recognized."), ""); SFB::CFString failureReason = CFCopyLocalizedString(CFSTR("File Format Not Recognized"), ""); SFB::CFString recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""); *error = CreateErrorForURL(Decoder::ErrorDomain, Decoder::InputOutputError, description, mInputSource->GetURL(), failureReason, recoverySuggestion); } return false; } result = ExtAudioFileWrapAudioFileID(mAudioFile, false, &mExtAudioFile); if(noErr != result) { LOGGER_CRIT("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileWrapAudioFileID failed: " << result); if(error) { SFB::CFString description = CFCopyLocalizedString(CFSTR("The format of the file “%@” was not recognized."), ""); SFB::CFString failureReason = CFCopyLocalizedString(CFSTR("File Format Not Recognized"), ""); SFB::CFString recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""); *error = CreateErrorForURL(Decoder::ErrorDomain, Decoder::InputOutputError, description, mInputSource->GetURL(), failureReason, recoverySuggestion); } result = AudioFileClose(mAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result); mAudioFile = nullptr; return false; } // Query file format UInt32 dataSize = sizeof(mSourceFormat); result = ExtAudioFileGetProperty(mExtAudioFile, kExtAudioFileProperty_FileDataFormat, &dataSize, &mSourceFormat); if(noErr != result) { LOGGER_CRIT("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetProperty (kExtAudioFileProperty_FileDataFormat) failed: " << result); result = ExtAudioFileDispose(mExtAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result); result = AudioFileClose(mAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result); mAudioFile = nullptr; mExtAudioFile = nullptr; return false; } // Tell the ExtAudioFile the format in which we'd like our data // For Linear PCM formats, leave the data untouched if(kAudioFormatLinearPCM == mSourceFormat.mFormatID) mFormat = mSourceFormat; // For Apple Lossless, convert to high-aligned signed ints in 32 bits else if(kAudioFormatAppleLossless == mSourceFormat.mFormatID) { mFormat.mFormatID = kAudioFormatLinearPCM; mFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsAlignedHigh; mFormat.mSampleRate = mSourceFormat.mSampleRate; mFormat.mChannelsPerFrame = mSourceFormat.mChannelsPerFrame; if(kAppleLosslessFormatFlag_16BitSourceData == mSourceFormat.mFormatFlags) mFormat.mBitsPerChannel = 16; else if(kAppleLosslessFormatFlag_20BitSourceData == mSourceFormat.mFormatFlags) mFormat.mBitsPerChannel = 20; else if(kAppleLosslessFormatFlag_24BitSourceData == mSourceFormat.mFormatFlags) mFormat.mBitsPerChannel = 24; else if(kAppleLosslessFormatFlag_32BitSourceData == mSourceFormat.mFormatFlags) mFormat.mBitsPerChannel = 32; mFormat.mBytesPerPacket = 4 * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mFormat.mReserved = 0; } // For all other formats convert to the canonical Core Audio format else { mFormat.mFormatID = kAudioFormatLinearPCM; mFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved; mFormat.mSampleRate = mSourceFormat.mSampleRate; mFormat.mChannelsPerFrame = mSourceFormat.mChannelsPerFrame; mFormat.mBitsPerChannel = 32; mFormat.mBytesPerPacket = (mFormat.mBitsPerChannel / 8); mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mFormat.mReserved = 0; } result = ExtAudioFileSetProperty(mExtAudioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(mFormat), &mFormat); if(noErr != result) { LOGGER_CRIT("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileSetProperty (kExtAudioFileProperty_ClientDataFormat) failed: " << result); result = ExtAudioFileDispose(mExtAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result); result = AudioFileClose(mAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result); mAudioFile = nullptr; mExtAudioFile = nullptr; return false; } // Setup the channel layout // There is a bug in EAF where if the underlying AF doesn't return a channel layout it returns an empty struct // result = ExtAudioFileGetPropertyInfo(mExtAudioFile, kExtAudioFileProperty_FileChannelLayout, &dataSize, nullptr); result = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyChannelLayout, &dataSize, nullptr); if(noErr == result) { auto channelLayout = (AudioChannelLayout *)malloc(dataSize); // result = ExtAudioFileGetProperty(mExtAudioFile, kExtAudioFileProperty_FileChannelLayout, &dataSize, mChannelLayout); result = AudioFileGetProperty(mAudioFile, kAudioFilePropertyChannelLayout, &dataSize, channelLayout); if(noErr != result) { // LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetProperty (kExtAudioFileProperty_FileChannelLayout) failed: " << result); LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileGetProperty (kAudioFilePropertyChannelLayout) failed: " << result); free(channelLayout); result = ExtAudioFileDispose(mExtAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result); result = AudioFileClose(mAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result); mAudioFile = nullptr; mExtAudioFile = nullptr; return false; } mChannelLayout = channelLayout; free(channelLayout); } else // LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetPropertyInfo (kExtAudioFileProperty_FileChannelLayout) failed: " << result); LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileGetPropertyInfo (kAudioFilePropertyChannelLayout) failed: " << result); // Work around bugs in ExtAudioFile: http://lists.apple.com/archives/coreaudio-api/2009/Nov/msg00119.html // Synopsis: ExtAudioFileTell() and ExtAudioFileSeek() are broken for m4a files AudioFileID audioFile; dataSize = sizeof(audioFile); result = ExtAudioFileGetProperty(mExtAudioFile, kExtAudioFileProperty_AudioFile, &dataSize, &audioFile); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileGetProperty (kExtAudioFileProperty_AudioFile) failed: " << result); result = ExtAudioFileDispose(mExtAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result); result = AudioFileClose(mAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result); mAudioFile = nullptr; mExtAudioFile = nullptr; return false; } AudioFileTypeID fileFormat; dataSize = sizeof(fileFormat); result = AudioFileGetProperty(audioFile, kAudioFilePropertyFileFormat, &dataSize, &fileFormat); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileGetProperty (kAudioFilePropertyFileFormat) failed: " << result); result = ExtAudioFileDispose(mExtAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result); result = AudioFileClose(mAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result); mAudioFile = nullptr; mExtAudioFile = nullptr; return false; } if(kAudioFileM4AType == fileFormat || kAudioFileMPEG4Type == fileFormat || kAudioFileAAC_ADTSType == fileFormat) mUseM4AWorkarounds = true; #if 0 // This was supposed to determine if ExtAudioFile had been fixed, but even though // it passes on 10.6.2 things are not behaving properly SInt64 currentFrame = -1; result = ExtAudioFileTell(mExtAudioFile, ¤tFrame); if(noErr != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileTell failed: " << result); result = ExtAudioFileDispose(mExtAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileDispose failed: " << result); result = AudioFileClose(mAudioFile); if(noErr != result) LOGGER_NOTICE("org.sbooth.AudioEngine.Decoder.CoreAudio", "AudioFileClose failed: " << result); mAudioFile = nullptr; mExtAudioFile = nullptr; return false; } if(0 > currentFrame) mUseM4AWorkarounds = true; #endif return true; }
bool FLACMetadata::WriteMetadata(CFErrorRef *error) { UInt8 buf [PATH_MAX]; if(!CFURLGetFileSystemRepresentation(mURL, false, buf, PATH_MAX)) return false; auto stream = new TagLib::FileStream(reinterpret_cast<const char *>(buf)); TagLib::FLAC::File file(stream, TagLib::ID3v2::FrameFactory::instance(), false); if(!file.isValid()) { if(error) { CFStringRef description = CFCopyLocalizedString(CFSTR("The file “%@” is not a valid FLAC file."), ""); CFStringRef failureReason = CFCopyLocalizedString(CFSTR("Not a FLAC file"), ""); CFStringRef recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""); *error = CreateErrorForURL(AudioMetadataErrorDomain, AudioMetadataInputOutputError, description, mURL, failureReason, recoverySuggestion); CFRelease(description), description = nullptr; CFRelease(failureReason), failureReason = nullptr; CFRelease(recoverySuggestion), recoverySuggestion = nullptr; } return false; } SetXiphCommentFromMetadata(*this, file.xiphComment(), false); // Remove existing cover art file.removePictures(); // Add album art for(auto attachedPicture : GetAttachedPictures()) { CGImageSourceRef imageSource = CGImageSourceCreateWithData(attachedPicture->GetData(), nullptr); if(nullptr == imageSource) { LOGGER_ERR("org.sbooth.AudioEngine.AudioMetadata.FLAC", "Skipping album art (unable to create image)"); continue; } TagLib::FLAC::Picture *picture = new TagLib::FLAC::Picture; picture->setData(TagLib::ByteVector((const char *)CFDataGetBytePtr(attachedPicture->GetData()), (TagLib::uint)CFDataGetLength(attachedPicture->GetData()))); picture->setType(static_cast<TagLib::FLAC::Picture::Type>(attachedPicture->GetType())); if(attachedPicture->GetDescription()) picture->setDescription(TagLib::StringFromCFString(attachedPicture->GetDescription())); // Convert the image's UTI into a MIME type CFStringRef mimeType = UTTypeCopyPreferredTagWithClass(CGImageSourceGetType(imageSource), kUTTagClassMIMEType); if(mimeType) { picture->setMimeType(TagLib::StringFromCFString(mimeType)); CFRelease(mimeType), mimeType = nullptr; } // Flesh out the height, width, and depth CFDictionaryRef imagePropertiesDictionary = CGImageSourceCopyPropertiesAtIndex(imageSource, 0, nullptr); if(imagePropertiesDictionary) { CFNumberRef imageWidth = (CFNumberRef)CFDictionaryGetValue(imagePropertiesDictionary, kCGImagePropertyPixelWidth); CFNumberRef imageHeight = (CFNumberRef)CFDictionaryGetValue(imagePropertiesDictionary, kCGImagePropertyPixelHeight); CFNumberRef imageDepth = (CFNumberRef)CFDictionaryGetValue(imagePropertiesDictionary, kCGImagePropertyDepth); int height, width, depth; // Ignore numeric conversion errors CFNumberGetValue(imageWidth, kCFNumberIntType, &width); CFNumberGetValue(imageHeight, kCFNumberIntType, &height); CFNumberGetValue(imageDepth, kCFNumberIntType, &depth); picture->setHeight(height); picture->setWidth(width); picture->setColorDepth(depth); CFRelease(imagePropertiesDictionary), imagePropertiesDictionary = nullptr; } file.addPicture(picture); CFRelease(imageSource), imageSource = nullptr; } if(!file.save()) { if(error) { CFStringRef description = CFCopyLocalizedString(CFSTR("The file “%@” is not a valid FLAC file."), ""); CFStringRef failureReason = CFCopyLocalizedString(CFSTR("Unable to write metadata"), ""); CFStringRef recoverySuggestion = CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""); *error = CreateErrorForURL(AudioMetadataErrorDomain, AudioMetadataInputOutputError, description, mURL, failureReason, recoverySuggestion); CFRelease(description), description = nullptr; CFRelease(failureReason), failureReason = nullptr; CFRelease(recoverySuggestion), recoverySuggestion = nullptr; } return false; } MergeChangedMetadataIntoMetadata(); return true; }
UInt32 SFB::Audio::LoopableRegionDecoder::_ReadAudio(AudioBufferList *bufferList, UInt32 frameCount) { // If the repeat count is N then (N + 1) passes must be completed to read all the frames if((1 + mRepeatCount) == mCompletedPasses) { for(UInt32 bufferIndex = 0; bufferIndex < bufferList->mNumberBuffers; ++bufferIndex) bufferList->mBuffers[bufferIndex].mDataByteSize = 0; return 0; } // Allocate an alias to the buffer list, which will contain pointers to the current write position in the output buffer AudioBufferList *bufferListAlias = (AudioBufferList *)alloca(offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer) * bufferList->mNumberBuffers)); if(nullptr == bufferListAlias) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.LoopableRegion", "Unable to allocate memory"); return 0; } UInt32 initialBufferCapacityBytes = bufferList->mBuffers[0].mDataByteSize; bufferListAlias->mNumberBuffers = bufferList->mNumberBuffers; // Initially the buffer list alias points to the beginning and contains no data for(UInt32 i = 0; i < bufferListAlias->mNumberBuffers; ++i) { bufferListAlias->mBuffers[i].mData = bufferList->mBuffers[i].mData; bufferListAlias->mBuffers[i].mDataByteSize = bufferList->mBuffers[i].mDataByteSize; bufferListAlias->mBuffers[i].mNumberChannels = bufferList->mBuffers[i].mNumberChannels; bufferList->mBuffers[i].mDataByteSize = 0; } UInt32 framesRemaining = frameCount; UInt32 totalFramesRead = 0; while(0 < framesRemaining) { UInt32 framesRemainingInCurrentPass = (UInt32)(mStartingFrame + mFrameCount - mDecoder->GetCurrentFrame()); UInt32 framesToRead = std::min(framesRemaining, framesRemainingInCurrentPass); // Nothing left to read if(0 == framesToRead) break; UInt32 framesRead = mDecoder->ReadAudio(bufferListAlias, framesToRead); // A read error occurred if(0 == framesRead) break; // Advance the write pointers and update the capacity for(UInt32 i = 0; i < bufferListAlias->mNumberBuffers; ++i) { int8_t *buf = (int8_t *)bufferListAlias->mBuffers[i].mData; bufferListAlias->mBuffers[i].mData = (void *)(buf + (framesRead * mFormat.mBytesPerFrame)); bufferList->mBuffers[i].mDataByteSize += bufferListAlias->mBuffers[i].mDataByteSize; bufferListAlias->mBuffers[i].mDataByteSize = initialBufferCapacityBytes - bufferList->mBuffers[i].mDataByteSize; } // Housekeeping mFramesReadInCurrentPass += framesRead; mTotalFramesRead += framesRead; totalFramesRead += framesRead; framesRemaining -= framesRead; // If this pass is finished, seek to the beginning of the region in preparation for the next read if(mFrameCount == mFramesReadInCurrentPass) { ++mCompletedPasses; mFramesReadInCurrentPass = 0; // Only seek to the beginning of the region if more passes remain if(mRepeatCount >= mCompletedPasses) mDecoder->SeekToFrame(mStartingFrame); } } return totalFramesRead; }
UInt32 SFB::Audio::MusepackDecoder::_ReadAudio(AudioBufferList *bufferList, UInt32 frameCount) { if(bufferList->mNumberBuffers != mFormat.mChannelsPerFrame) { LOGGER_WARNING("org.sbooth.AudioEngine.Decoder.Musepack", "_ReadAudio() called with invalid parameters"); return 0; } MPC_SAMPLE_FORMAT buffer [MPC_DECODER_BUFFER_LENGTH]; UInt32 framesRead = 0; // Reset output buffer data size for(UInt32 i = 0; i < bufferList->mNumberBuffers; ++i) bufferList->mBuffers[i].mDataByteSize = 0; for(;;) { UInt32 framesRemaining = frameCount - framesRead; UInt32 framesToSkip = (UInt32)(bufferList->mBuffers[0].mDataByteSize / sizeof(float)); UInt32 framesInBuffer = (UInt32)(mBufferList->mBuffers[0].mDataByteSize / sizeof(float)); UInt32 framesToCopy = std::min(framesInBuffer, framesRemaining); // Copy data from the buffer to output for(UInt32 i = 0; i < mBufferList->mNumberBuffers; ++i) { float *floatBuffer = (float *)bufferList->mBuffers[i].mData; memcpy(floatBuffer + framesToSkip, mBufferList->mBuffers[i].mData, framesToCopy * sizeof(float)); bufferList->mBuffers[i].mDataByteSize += framesToCopy * sizeof(float); // Move remaining data in buffer to beginning if(framesToCopy != framesInBuffer) { floatBuffer = (float *)mBufferList->mBuffers[i].mData; memmove(floatBuffer, floatBuffer + framesToCopy, (framesInBuffer - framesToCopy) * sizeof(float)); } mBufferList->mBuffers[i].mDataByteSize -= framesToCopy * sizeof(float); } framesRead += framesToCopy; // All requested frames were read if(framesRead == frameCount) break; // Decode one frame of MPC data mpc_frame_info frame; frame.buffer = buffer; mpc_status result = mpc_demux_decode(mDemux, &frame); if(MPC_STATUS_OK != result) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.Musepack", "Musepack decoding error"); break; } // End of input if(-1 == frame.bits) break; #ifdef MPC_FIXED_POINT #error "Fixed point not yet supported" #else float *inputBuffer = (float *)buffer; // Clip the samples to [-1, 1) float minValue = -1.f; float maxValue = 8388607.f / 8388608.f; vDSP_vclip(inputBuffer, 1, &minValue, &maxValue, inputBuffer, 1, frame.samples * mFormat.mChannelsPerFrame); // Deinterleave the normalized samples for(UInt32 channel = 0; channel < mFormat.mChannelsPerFrame; ++channel) { float *floatBuffer = (float *)mBufferList->mBuffers[channel].mData; for(UInt32 sample = channel; sample < frame.samples * mFormat.mChannelsPerFrame; sample += mFormat.mChannelsPerFrame) *floatBuffer++ = inputBuffer[sample]; mBufferList->mBuffers[channel].mNumberChannels = 1; mBufferList->mBuffers[channel].mDataByteSize = frame.samples * sizeof(float); } #endif /* MPC_FIXED_POINT */ } mCurrentFrame += framesRead; return framesRead; }
bool SFB::Audio::DSFDecoder::_Open(CFErrorRef *error) { // Read the 'DSD ' chunk uint32_t chunkID; if(!ReadChunkID(GetInputSource(), chunkID) || 'DSD ' != chunkID) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unable to read 'DSD ' chunk"); return false; } uint64_t chunkSize, fileSize, metadataOffset; // Unlike normal IFF, the chunkSize includes the size of the chunk ID and size if(!GetInputSource().ReadLE<uint64_t>(chunkSize) || 28 != chunkSize) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected 'DSD ' chunk size: " << chunkSize); return false; } if(!GetInputSource().ReadLE<uint64_t>(fileSize)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unable to read file size in 'DSD ' chunk"); return false; } if(!GetInputSource().ReadLE<uint64_t>(metadataOffset)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unable to read metadata offset in 'DSD ' chunk"); return false; } // Read the 'fmt ' chunk if(!ReadChunkID(GetInputSource(), chunkID) || 'fmt ' != chunkID) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unable to read 'fmt ' chunk"); return false; } if(!GetInputSource().ReadLE<uint64_t>(chunkSize)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected 'fmt ' chunk size: " << chunkSize); return false; } uint32_t formatVersion, formatID, channelType, channelNum, samplingFrequency, bitsPerSample; uint64_t sampleCount; uint32_t blockSizePerChannel, reserved; if(!GetInputSource().ReadLE<uint32_t>(formatVersion) || 1 != formatVersion) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected format version in 'fmt ': " << formatVersion); return false; } if(!GetInputSource().ReadLE<uint32_t>(formatID) || 0 != formatID) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected format ID in 'fmt ': " << formatID); return false; } if(!GetInputSource().ReadLE<uint32_t>(channelType) || (1 > channelType && 7 < channelType)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected channel type in 'fmt ': " << channelType); return false; } if(!GetInputSource().ReadLE<uint32_t>(channelNum) || (1 > channelNum && 6 < channelNum)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected channel count in 'fmt ': " << channelNum); return false; } if(!GetInputSource().ReadLE<uint32_t>(samplingFrequency) || (2822400 != samplingFrequency && 5644800 != samplingFrequency)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected sample rate in 'fmt ': " << samplingFrequency); return false; } if(!GetInputSource().ReadLE<uint32_t>(bitsPerSample) || (1 != bitsPerSample && 8 != bitsPerSample)) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected bits per sample in 'fmt ': " << bitsPerSample); return false; } if(!GetInputSource().ReadLE<uint64_t>(sampleCount) ) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unable to read sample count in 'fmt ' chunk"); return false; } if(!GetInputSource().ReadLE<uint32_t>(blockSizePerChannel) || 4096 != blockSizePerChannel) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected block size per channel in 'fmt ': " << blockSizePerChannel); return false; } if(!GetInputSource().ReadLE<uint32_t>(reserved) || 0 != reserved) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected non-zero value for reserved in 'fmt ': " << reserved); return false; } // Read the 'data' chunk if(!ReadChunkID(GetInputSource(), chunkID) || 'data' != chunkID) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unable to read 'data' chunk"); return false; } if(!GetInputSource().ReadLE<uint64_t>(chunkSize) ) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.DSF", "Unexpected 'data' chunk size: " << chunkSize); return false; } mBlockByteSizePerChannel = blockSizePerChannel; mAudioOffset = GetInputSource().GetOffset(); mTotalFrames = (SInt64)sampleCount; // Set up the source format mSourceFormat.mFormatID = kAudioFormatDirectStreamDigital; mSourceFormat.mSampleRate = (Float64)samplingFrequency; mSourceFormat.mChannelsPerFrame = (UInt32)channelNum; // The output format is raw DSD mFormat.mFormatID = kAudioFormatDirectStreamDigital; mFormat.mFormatFlags = kAudioFormatFlagIsNonInterleaved | (8 == bitsPerSample ? kAudioFormatFlagIsBigEndian : 0); mFormat.mSampleRate = (Float64)samplingFrequency; mFormat.mChannelsPerFrame = (UInt32)channelNum; mFormat.mBitsPerChannel = 1; mFormat.mBytesPerPacket = 1; mFormat.mFramesPerPacket = 8; mFormat.mBytesPerFrame = 0; mFormat.mReserved = 0; // Channel layouts are defined in the DSF file format specification switch(channelType) { case 1: mChannelLayout = ChannelLayout::ChannelLayoutWithTag(kAudioChannelLayoutTag_Mono); break; case 2: mChannelLayout = ChannelLayout::ChannelLayoutWithTag(kAudioChannelLayoutTag_Stereo); break; case 3: mChannelLayout = ChannelLayout::ChannelLayoutWithTag(kAudioChannelLayoutTag_MPEG_3_0_A); break; case 4: mChannelLayout = ChannelLayout::ChannelLayoutWithTag(kAudioChannelLayoutTag_Quadraphonic); break; case 5: mChannelLayout = ChannelLayout::ChannelLayoutWithTag(kAudioChannelLayoutTag_ITU_2_2); break; case 6: mChannelLayout = ChannelLayout::ChannelLayoutWithTag(kAudioChannelLayoutTag_MPEG_5_0_A); break; case 7: mChannelLayout = ChannelLayout::ChannelLayoutWithTag(kAudioChannelLayoutTag_MPEG_5_1_A); break; } // Metadata chunk is ignored // Allocate buffers mBufferList.Allocate(mFormat, (UInt32)mFormat.ByteCountToFrameCount(mBlockByteSizePerChannel)); for(UInt32 i = 0; i < mBufferList->mNumberBuffers; ++i) mBufferList->mBuffers[i].mDataByteSize = 0; return true; }
UInt32 OggSpeexDecoder::ReadAudio(AudioBufferList *bufferList, UInt32 frameCount) { if(!IsOpen() || NULL == bufferList || bufferList->mNumberBuffers != mFormat.mChannelsPerFrame || 0 == frameCount) return 0; UInt32 framesRead = 0; // Reset output buffer data size for(UInt32 i = 0; i < bufferList->mNumberBuffers; ++i) bufferList->mBuffers[i].mDataByteSize = 0; for(;;) { UInt32 framesRemaining = frameCount - framesRead; UInt32 framesToSkip = static_cast<UInt32>(bufferList->mBuffers[0].mDataByteSize / sizeof(float)); UInt32 framesInBuffer = static_cast<UInt32>(mBufferList->mBuffers[0].mDataByteSize / sizeof(float)); UInt32 framesToCopy = std::min(framesInBuffer, framesRemaining); // Copy data from the buffer to output for(UInt32 i = 0; i < mBufferList->mNumberBuffers; ++i) { float *floatBuffer = static_cast<float *>(bufferList->mBuffers[i].mData); memcpy(floatBuffer + framesToSkip, mBufferList->mBuffers[i].mData, framesToCopy * sizeof(float)); bufferList->mBuffers[i].mDataByteSize += static_cast<UInt32>(framesToCopy * sizeof(float)); // Move remaining data in buffer to beginning if(framesToCopy != framesInBuffer) { floatBuffer = static_cast<float *>(mBufferList->mBuffers[i].mData); memmove(floatBuffer, floatBuffer + framesToCopy, (framesInBuffer - framesToCopy) * sizeof(float)); } mBufferList->mBuffers[i].mDataByteSize -= static_cast<UInt32>(framesToCopy * sizeof(float)); } framesRead += framesToCopy; // All requested frames were read if(framesRead == frameCount) break; // EOS reached if(mSpeexEOSReached) break; // Attempt to process the desired number of packets unsigned packetsDesired = 1; while(0 < packetsDesired && !mSpeexEOSReached) { // Process any packets in the current page while(0 < packetsDesired && !mSpeexEOSReached) { // Grab a packet from the streaming layer ogg_packet oggPacket; int result = ogg_stream_packetout(&mOggStreamState, &oggPacket); if(-1 == result) { LOGGER_ERR("org.sbooth.AudioEngine.AudioDecoder.OggSpeex", "Ogg Speex decoding error: Ogg loss of streaming"); break; } // If result is 0, there is insufficient data to assemble a packet if(0 == result) break; // Otherwise, we got a valid packet for processing if(1 == result) { if(5 <= oggPacket.bytes && !memcmp(oggPacket.packet, "Speex", 5)) mSpeexSerialNumber = mOggStreamState.serialno; if(-1 == mSpeexSerialNumber || mOggStreamState.serialno != mSpeexSerialNumber) break; // Ignore the following: // - Speex comments in packet #2 // - Extra headers (optionally) in packets 3+ if(1 != mOggPacketCount && 1 + mExtraSpeexHeaderCount <= mOggPacketCount) { // Detect Speex EOS if(oggPacket.e_o_s && mOggStreamState.serialno == mSpeexSerialNumber) mSpeexEOSReached = true; // SPEEX_GET_FRAME_SIZE is in samples spx_int32_t speexFrameSize; speex_decoder_ctl(mSpeexDecoder, SPEEX_GET_FRAME_SIZE, &speexFrameSize); float buffer [(2 == mFormat.mChannelsPerFrame) ? 2 * speexFrameSize : speexFrameSize]; // Copy the Ogg packet to the Speex bitstream speex_bits_read_from(&mSpeexBits, (char *)oggPacket.packet, static_cast<int>(oggPacket.bytes)); // Decode each frame in the Speex packet for(spx_int32_t i = 0; i < mSpeexFramesPerOggPacket; ++i) { result = speex_decode(mSpeexDecoder, &mSpeexBits, buffer); // -1 indicates EOS if(-1 == result) break; else if(-2 == result) { LOGGER_ERR("org.sbooth.AudioEngine.AudioDecoder.OggSpeex", "Ogg Speex decoding error: possible corrupted stream"); break; } if(0 > speex_bits_remaining(&mSpeexBits)) { LOGGER_ERR("org.sbooth.AudioEngine.AudioDecoder.OggSpeex", "Ogg Speex decoding overflow: possible corrupted stream"); break; } // Normalize the values float maxSampleValue = 1u << 15; vDSP_vsdiv(buffer, 1, &maxSampleValue, buffer, 1, speexFrameSize); // Copy the frames from the decoding buffer to the output buffer, skipping over any frames already decoded framesInBuffer = static_cast<UInt32>(mBufferList->mBuffers[0].mDataByteSize / sizeof(float)); memcpy(static_cast<float *>(mBufferList->mBuffers[0].mData) + framesInBuffer, buffer, speexFrameSize * sizeof(float)); mBufferList->mBuffers[0].mDataByteSize += static_cast<UInt32>(speexFrameSize * sizeof(float)); // Process stereo channel, if present if(2 == mFormat.mChannelsPerFrame) { speex_decode_stereo(buffer, speexFrameSize, mSpeexStereoState); vDSP_vsdiv(buffer + speexFrameSize, 1, &maxSampleValue, buffer + speexFrameSize, 1, speexFrameSize); memcpy(static_cast<float *>(mBufferList->mBuffers[1].mData) + framesInBuffer, buffer + speexFrameSize, speexFrameSize * sizeof(float)); mBufferList->mBuffers[1].mDataByteSize += static_cast<UInt32>(speexFrameSize * sizeof(float)); } // Packet processing finished --packetsDesired; } } ++mOggPacketCount; } } // Grab a new Ogg page for processing, if necessary if(!mSpeexEOSReached && 0 < packetsDesired) { while(1 != ogg_sync_pageout(&mOggSyncState, &mOggPage)) { // Get the ogg buffer for writing char *data = ogg_sync_buffer(&mOggSyncState, READ_SIZE_BYTES); // Read bitstream from input file ssize_t bytesRead = GetInputSource()->Read(data, READ_SIZE_BYTES); if(-1 == bytesRead) { LOGGER_ERR("org.sbooth.AudioEngine.AudioDecoder.OggSpeex", "Unable to read from the input file"); break; } ogg_sync_wrote(&mOggSyncState, bytesRead); // No more data available from input file if(0 == bytesRead) break; } // Ensure all Ogg streams are read if(ogg_page_serialno(&mOggPage) != mOggStreamState.serialno) ogg_stream_reset_serialno(&mOggStreamState, ogg_page_serialno(&mOggPage)); // Get the resultant Ogg page int result = ogg_stream_pagein(&mOggStreamState, &mOggPage); if(0 != result) { LOGGER_ERR("org.sbooth.AudioEngine.AudioDecoder.OggSpeex", "Error reading Ogg page"); break; } } } } mCurrentFrame += framesRead; if(0 == framesRead && mSpeexEOSReached) mTotalFrames = mCurrentFrame; return framesRead; }
bool SFB::Audio::LibsndfileDecoder::_Open(CFErrorRef *error) { // Set up the virtual IO function pointers SF_VIRTUAL_IO virtualIO; virtualIO.get_filelen = my_sf_vio_get_filelen; virtualIO.seek = my_sf_vio_seek; virtualIO.read = my_sf_vio_read; virtualIO.write = nullptr; virtualIO.tell = my_sf_vio_tell; // Open the input file mFile = unique_SNDFILE_ptr(sf_open_virtual(&virtualIO, SFM_READ, &mFileInfo, this), sf_close); if(!mFile) { LOGGER_ERR("org.sbooth.AudioEngine.Decoder.Libsndfile", "sf_open_virtual failed: " << sf_error_number(sf_error(nullptr))); if(nullptr != error) { SFB::CFString description(CFCopyLocalizedString(CFSTR("The format of the file “%@” was not recognized."), "")); SFB::CFString failureReason(CFCopyLocalizedString(CFSTR("File Format Not Recognized"), "")); SFB::CFString recoverySuggestion(CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), "")); *error = CreateErrorForURL(Decoder::ErrorDomain, Decoder::InputOutputError, description, mInputSource->GetURL(), failureReason, recoverySuggestion); } return false; } // Generate interleaved PCM output mFormat.mFormatID = kAudioFormatLinearPCM; mFormat.mSampleRate = mFileInfo.samplerate; mFormat.mChannelsPerFrame = (UInt32)mFileInfo.channels; int subFormat = SF_FORMAT_SUBMASK & mFileInfo.format; // 8-bit PCM will be high-aligned in shorts if(SF_FORMAT_PCM_U8 == subFormat) { mFormat.mFormatFlags = kAudioFormatFlagIsAlignedHigh; mFormat.mBitsPerChannel = 8; mFormat.mBytesPerPacket = sizeof(short) * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mReadMethod = ReadMethod::Short; } else if(SF_FORMAT_PCM_S8 == subFormat) { mFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsAlignedHigh; mFormat.mBitsPerChannel = 8; mFormat.mBytesPerPacket = sizeof(short) * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mReadMethod = ReadMethod::Short; } // 16-bit PCM else if(SF_FORMAT_PCM_16 == subFormat) { mFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; mFormat.mBitsPerChannel = 16; mFormat.mBytesPerPacket = (mFormat.mBitsPerChannel / 8) * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mReadMethod = ReadMethod::Short; } // 24-bit PCM will be high-aligned in ints else if(SF_FORMAT_PCM_24 == subFormat) { mFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsAlignedHigh; mFormat.mBitsPerChannel = 24; mFormat.mBytesPerPacket = sizeof(int) * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mReadMethod = ReadMethod::Int; } // 32-bit PCM else if(SF_FORMAT_PCM_32 == subFormat) { mFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; mFormat.mBitsPerChannel = 32; mFormat.mBytesPerPacket = (mFormat.mBitsPerChannel / 8) * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mReadMethod = ReadMethod::Int; } // Floating point formats else if(SF_FORMAT_FLOAT == subFormat) { mFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; mFormat.mBitsPerChannel = 8 * sizeof(float); mFormat.mBytesPerPacket = (mFormat.mBitsPerChannel / 8) * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mReadMethod = ReadMethod::Float; } else if(SF_FORMAT_DOUBLE == subFormat) { mFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; mFormat.mBitsPerChannel = 8 * sizeof(double); mFormat.mBytesPerPacket = (mFormat.mBitsPerChannel / 8) * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mReadMethod = ReadMethod::Double; } // Everything else will be converted to 32-bit float else { mFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; mFormat.mBitsPerChannel = 8 * sizeof(float); mFormat.mBytesPerPacket = (mFormat.mBitsPerChannel / 8) * mFormat.mChannelsPerFrame; mFormat.mFramesPerPacket = 1; mFormat.mBytesPerFrame = mFormat.mBytesPerPacket * mFormat.mFramesPerPacket; mReadMethod = ReadMethod::Float; } mFormat.mReserved = 0; // Set up the source format mSourceFormat.mFormatID = 'SNDF'; mSourceFormat.mSampleRate = mFileInfo.samplerate; mSourceFormat.mChannelsPerFrame = (UInt32)mFileInfo.channels; switch(subFormat) { case SF_FORMAT_PCM_U8: mSourceFormat.mBitsPerChannel = 8; break; case SF_FORMAT_PCM_S8: mSourceFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger; mSourceFormat.mBitsPerChannel = 8; break; case SF_FORMAT_PCM_16: mSourceFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger; mSourceFormat.mBitsPerChannel = 16; break; case SF_FORMAT_PCM_24: mSourceFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger; mSourceFormat.mBitsPerChannel = 24; break; case SF_FORMAT_PCM_32: mSourceFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger; mSourceFormat.mBitsPerChannel = 32; break; case SF_FORMAT_FLOAT: mSourceFormat.mFormatFlags = kAudioFormatFlagIsFloat; mSourceFormat.mBitsPerChannel = 32; break; case SF_FORMAT_DOUBLE: mSourceFormat.mFormatFlags = kAudioFormatFlagIsFloat; mSourceFormat.mBitsPerChannel = 64; break; } return true; }