static AudioChannelLayout *ffat_convert_layout(AudioChannelLayout *layout, UInt32* size) { AudioChannelLayoutTag tag = layout->mChannelLayoutTag; AudioChannelLayout *new_layout; if (tag == kAudioChannelLayoutTag_UseChannelDescriptions) return layout; else if (tag == kAudioChannelLayoutTag_UseChannelBitmap) AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(UInt32), &layout->mChannelBitmap, size); else AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForTag, sizeof(AudioChannelLayoutTag), &tag, size); new_layout = av_malloc(*size); if (!new_layout) { av_free(layout); return NULL; } if (tag == kAudioChannelLayoutTag_UseChannelBitmap) AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(UInt32), &layout->mChannelBitmap, size, new_layout); else AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForTag, sizeof(AudioChannelLayoutTag), &tag, size, new_layout); new_layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions; av_free(layout); return new_layout; }
void CCoreAudioMixMap::Rebuild(AudioChannelLayout& inLayout, AudioChannelLayout& outLayout) { // map[in][out] = mix-level of input_channel[in] into output_channel[out] free(m_pMap); m_pMap = NULL; m_inChannels = CCoreAudioChannelLayout::GetChannelCountForLayout(inLayout); m_outChannels = CCoreAudioChannelLayout::GetChannelCountForLayout(outLayout); // Try to find a 'well-known' matrix const AudioChannelLayout* layouts[] = {&inLayout, &outLayout}; UInt32 propSize = 0; AudioFormatGetPropertyInfo(kAudioFormatProperty_MatrixMixMap, sizeof(layouts), layouts, &propSize); m_pMap = (Float32*)calloc(1,propSize); // Try and get a predefined mixmap OSStatus ret = AudioFormatGetProperty(kAudioFormatProperty_MatrixMixMap, sizeof(layouts), layouts, &propSize, m_pMap); if (!ret) { // Nothing else to do...a map already exists m_isValid = true; return; } // No predefined mixmap was available. Going to have to build it manually CLog::Log(LOGDEBUG, "CCoreAudioMixMap::CreateMap: Unable to locate pre-defined mixing matrix"); m_isValid = false; }
void PrintCompatibleChannelLayouts() { int plen = strlen(prefix); for (LayoutTag *tag1 = gLayoutTags; tag1->name != NULL; ++tag1) { AudioChannelLayout layout1 = { tag1->constant, 0, 0 }; printf("\t'%s' : (", tag1->name + plen); int printed = 0; for (LayoutTag *tag2 = gLayoutTags; tag2->name != NULL; ++tag2) { AudioChannelLayout layout2 = { tag2->constant, 0, 0 }; AudioChannelLayout *layouts[] = { &layout1, &layout2 }; UInt32 propertySize; OSStatus err = AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelMap, sizeof(layouts), layouts, &propertySize); if (err == noErr) { SInt32 *map = (SInt32 *)malloc(propertySize); err = AudioFormatGetProperty(kAudioFormatProperty_ChannelMap, sizeof(layouts), layouts, &propertySize, map); if (err == noErr) { if (printed++) printf(", "); printf("'%s'", tag2->name + plen); } } } printf("),\n"); } }
bool CCoreAudioChannelLayout::CopyLayout(AudioChannelLayout& layout) { enum { kVariableLengthArray_deprecated = 1 }; free(m_pLayout); m_pLayout = NULL; // This method always produces a layout with a ChannelDescriptions structure OSStatus ret = 0; UInt32 channels = GetChannelCountForLayout(layout); UInt32 size = sizeof(AudioChannelLayout) + (channels - kVariableLengthArray_deprecated) * sizeof(AudioChannelDescription); if (layout.mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { // We can copy the whole layout m_pLayout = (AudioChannelLayout*)malloc(size); memcpy(m_pLayout, &layout, size); } else if (layout.mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { // Deconstruct the bitmap to get the layout UInt32 propSize = 0; AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(layout.mChannelBitmap), &layout.mChannelBitmap, &propSize); m_pLayout = (AudioChannelLayout*)malloc(propSize); ret = AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(layout.mChannelBitmap), &layout.mChannelBitmap, &propSize, m_pLayout); m_pLayout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions; } else { // Convert the known layout to a custom layout UInt32 propSize = 0; AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForTag, sizeof(layout.mChannelLayoutTag), &layout.mChannelLayoutTag, &propSize); m_pLayout = (AudioChannelLayout*)malloc(propSize); ret = AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForTag, sizeof(layout.mChannelLayoutTag), &layout.mChannelLayoutTag, &propSize, m_pLayout); m_pLayout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions; } return (ret == noErr); }
static AudioChannelLayout *ca_layout_to_custom_layout(struct ao *ao, void *talloc_ctx, AudioChannelLayout *l) { AudioChannelLayoutTag tag = l->mChannelLayoutTag; AudioChannelLayout *r; OSStatus err; if (tag == kAudioChannelLayoutTag_UseChannelDescriptions) return l; if (tag == kAudioChannelLayoutTag_UseChannelBitmap) { uint32_t psize; err = AudioFormatGetPropertyInfo( kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(uint32_t), &l->mChannelBitmap, &psize); CHECK_CA_ERROR("failed to convert channel bitmap to descriptions (info)"); r = talloc_size(NULL, psize); err = AudioFormatGetProperty( kAudioFormatProperty_ChannelLayoutForBitmap, sizeof(uint32_t), &l->mChannelBitmap, &psize, r); CHECK_CA_ERROR("failed to convert channel bitmap to descriptions (get)"); } else { uint32_t psize; err = AudioFormatGetPropertyInfo( kAudioFormatProperty_ChannelLayoutForTag, sizeof(AudioChannelLayoutTag), &l->mChannelLayoutTag, &psize); r = talloc_size(NULL, psize); CHECK_CA_ERROR("failed to convert channel tag to descriptions (info)"); err = AudioFormatGetProperty( kAudioFormatProperty_ChannelLayoutForTag, sizeof(AudioChannelLayoutTag), &l->mChannelLayoutTag, &psize, r); CHECK_CA_ERROR("failed to convert channel tag to descriptions (get)"); } MP_VERBOSE(ao, "converted input channel layout:\n"); ca_log_layout(ao, MSGL_V, l); return r; coreaudio_error: return NULL; }
void GetFormatFromInputFile (AudioFileID inputFile, CAStreamBasicDescription & inputFormat) { bool doPrint = true; UInt32 size; XThrowIfError(AudioFileGetPropertyInfo(inputFile, kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info"); UInt32 numFormats = size / sizeof(AudioFormatListItem); AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ]; XThrowIfError(AudioFileGetProperty(inputFile, kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format"); numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it if (numFormats == 1) { // this is the common case inputFormat = formatList[0].mASBD; } else { if (doPrint) { printf ("File has a %d layered data format:\n", (int)numFormats); for (unsigned int i = 0; i < numFormats; ++i) CAStreamBasicDescription(formatList[i].mASBD).Print(); printf("\n"); } // now we should look to see which decoders we have on the system XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's"); UInt32 numDecoders = size / sizeof(OSType); OSType *decoderIDs = new OSType [ numDecoders ]; XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's"); unsigned int i = 0; for (; i < numFormats; ++i) { OSType decoderID = formatList[i].mASBD.mFormatID; bool found = false; for (unsigned int j = 0; j < numDecoders; ++j) { if (decoderID == decoderIDs[j]) { found = true; break; } } if (found) break; } delete [] decoderIDs; if (i >= numFormats) { fprintf (stderr, "Cannot play any of the formats in this file\n"); throw kAudioFileUnsupportedDataFormatError; } inputFormat = formatList[i].mASBD; } delete [] formatList; }
CAAudioFileFormats::CAAudioFileFormats() : mNumFileFormats(0), mFileFormats(NULL) { OSStatus err; UInt32 size; UInt32 *fileTypes = NULL, *writableFormats = NULL, *readableFormats = NULL; int nWritableFormats, nReadableFormats; // get all file types err = AudioFileGetGlobalInfoSize(kAudioFileGlobalInfo_WritableTypes, 0, NULL, &size); if (err != noErr) goto bail; mNumFileFormats = size / sizeof(UInt32); mFileFormats = new FileFormatInfo[mNumFileFormats]; fileTypes = new UInt32[mNumFileFormats]; err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_WritableTypes, 0, NULL, &size, fileTypes); if (err != noErr) goto bail; // get all writable formats err = AudioFormatGetPropertyInfo(kAudioFormatProperty_EncodeFormatIDs, 0, NULL, &size); if (err != noErr) goto bail; nWritableFormats = size / sizeof(UInt32); writableFormats = new UInt32[nWritableFormats]; err = AudioFormatGetProperty(kAudioFormatProperty_EncodeFormatIDs, 0, NULL, &size, writableFormats); if (err != noErr) goto bail; // get all readable formats err = AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size); if (err != noErr) goto bail; nReadableFormats = size / sizeof(UInt32); readableFormats = new UInt32[nReadableFormats]; err = AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, readableFormats); if (err != noErr) goto bail; // get info for each file type for (int i = 0; i < mNumFileFormats; ++i) { FileFormatInfo *ffi = &mFileFormats[i]; OSType filetype = fileTypes[i]; ffi->mFileTypeID = filetype; // file type name ffi->mFileTypeName = NULL; size = sizeof(CFStringRef); err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_FileTypeName, sizeof(UInt32), &filetype, &size, &ffi->mFileTypeName); if (ffi->mFileTypeName) CFRetain(ffi->mFileTypeName); // file extensions size = sizeof(CFArrayRef); err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_ExtensionsForType, sizeof(OSType), &filetype, &size, &ffi->mExtensions); if (err) ffi->mExtensions = NULL; // file data formats ffi->mNumDataFormats = 0; ffi->mDataFormats = NULL; err = AudioFileGetGlobalInfoSize(kAudioFileGlobalInfo_AvailableFormatIDs, sizeof(UInt32), &filetype, &size); if (err == noErr) { ffi->mNumDataFormats = size / sizeof(OSType); OSType *formatIDs = new OSType[ffi->mNumDataFormats]; err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_AvailableFormatIDs, sizeof(UInt32), &filetype, &size, formatIDs); if (err == noErr) { ffi->mDataFormats = new DataFormatInfo[ffi->mNumDataFormats]; for (int j = 0; j < ffi->mNumDataFormats; ++j) { int k; bool anyBigEndian = false, anyLittleEndian = false; DataFormatInfo *dfi = &ffi->mDataFormats[j]; dfi->mFormatID = formatIDs[j]; dfi->mReadable = (dfi->mFormatID == kAudioFormatLinearPCM); dfi->mWritable = (dfi->mFormatID == kAudioFormatLinearPCM); for (k = 0; k < nReadableFormats; ++k) if (readableFormats[k] == dfi->mFormatID) { dfi->mReadable = true; break; } for (k = 0; k < nWritableFormats; ++k) if (writableFormats[k] == dfi->mFormatID) { dfi->mWritable = true; break; } dfi->mNumVariants = 0; AudioFileTypeAndFormatID tf = { filetype, dfi->mFormatID }; err = AudioFileGetGlobalInfoSize(kAudioFileGlobalInfo_AvailableStreamDescriptionsForFormat, sizeof(AudioFileTypeAndFormatID), &tf, &size); if (err == noErr) { dfi->mNumVariants = size / sizeof(AudioStreamBasicDescription); dfi->mVariants = new AudioStreamBasicDescription[dfi->mNumVariants]; err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_AvailableStreamDescriptionsForFormat, sizeof(AudioFileTypeAndFormatID), &tf, &size, dfi->mVariants); if (err) { dfi->mNumVariants = 0; delete[] dfi->mVariants; dfi->mVariants = NULL; } else { for (k = 0; k < dfi->mNumVariants; ++k) { AudioStreamBasicDescription *desc = &dfi->mVariants[k]; if (desc->mBitsPerChannel > 8) { if (desc->mFormatFlags & kAudioFormatFlagIsBigEndian) anyBigEndian = true; else anyLittleEndian = true; } } } } dfi->mEitherEndianPCM = (anyBigEndian && anyLittleEndian); } } delete[] formatIDs; } } // sort file formats by name qsort(mFileFormats, mNumFileFormats, sizeof(FileFormatInfo), CompareFileFormatNames); bail: delete[] fileTypes; delete[] readableFormats; delete[] writableFormats; }
int main (int argc, const char * argv[]) { #if TARGET_OS_WIN32 InitializeQTML(0L); #endif const char *fpath = NULL; Float32 volume = 1; Float32 duration = -1; Float32 currentTime = 0.0; Float32 rate = 0; int rQuality = 0; bool doPrint = false; for (int i = 1; i < argc; ++i) { const char *arg = argv[i]; if (arg[0] != '-') { if (fpath != NULL) { fprintf(stderr, "may only specify one file to play\n"); usage(); } fpath = arg; } else { arg += 1; if (arg[0] == 'v' || !strcmp(arg, "-volume")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &volume); } else if (arg[0] == 't' || !strcmp(arg, "-time")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &duration); } else if (arg[0] == 'r' || !strcmp(arg, "-rate")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &rate); } else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%d", &rQuality); } else if (arg[0] == 'h' || !strcmp(arg, "-help")) { usage(); } else if (arg[0] == 'd' || !strcmp(arg, "-debug")) { doPrint = true; } else { fprintf(stderr, "unknown argument: %s\n\n", arg - 1); usage(); } } } if (fpath == NULL) usage(); if (doPrint) printf ("Playing file: %s\n", fpath); try { AQTestInfo myInfo; CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false); if (!sndFile) XThrowIfError (!sndFile, "can't parse file path"); OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile); CFRelease (sndFile); XThrowIfError(result, "AudioFileOpen failed"); UInt32 size; XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info"); UInt32 numFormats = size / sizeof(AudioFormatListItem); AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ]; XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format"); numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it if (numFormats == 1) { // this is the common case myInfo.mDataFormat = formatList[0].mASBD; // see if there is a channel layout (multichannel file) result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL); if (result == noErr && myInfo.mChannelLayoutSize > 0) { myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize]; XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout"); } } else { if (doPrint) { printf ("File has a %d layered data format:\n", (int)numFormats); for (unsigned int i = 0; i < numFormats; ++i) CAStreamBasicDescription(formatList[i].mASBD).Print(); } // now we should look to see which decoders we have on the system XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's"); UInt32 numDecoders = size / sizeof(OSType); OSType *decoderIDs = new OSType [ numDecoders ]; XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's"); unsigned int i = 0; for (; i < numFormats; ++i) { OSType decoderID = formatList[i].mASBD.mFormatID; bool found = false; for (unsigned int j = 0; j < numDecoders; ++j) { if (decoderID == decoderIDs[j]) { found = true; break; } } if (found) break; } delete [] decoderIDs; if (i >= numFormats) { fprintf (stderr, "Cannot play any of the formats in this file\n"); throw kAudioFileUnsupportedDataFormatError; } myInfo.mDataFormat = formatList[i].mASBD; myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout); myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize]; myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag; myInfo.mChannelLayout->mChannelBitmap = 0; myInfo.mChannelLayout->mNumberChannelDescriptions = 0; } delete [] formatList; if (doPrint) { printf ("Playing format: "); myInfo.mDataFormat.Print(); } XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed"); UInt32 bufferByteSize; // we need to calculate how many packets we read at a time, and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer { bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0); // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger UInt32 maxPacketSize; size = sizeof(maxPacketSize); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size"); // adjust buffer size to represent about a half second of audio based on this format CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead); if (isFormatVBR) myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead]; else myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) if (doPrint) printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead); } // (2) If the file has a cookie, we should get it and set it on the AQ size = sizeof(UInt32); result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL); if (!result && size) { char* cookie = new char [size]; XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file"); XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue"); delete [] cookie; } // set ACL if there is one if (myInfo.mChannelLayout) XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue"); // prime the queue with some data before starting myInfo.mDone = false; myInfo.mCurrentPacket = 0; for (int i = 0; i < kNumberBuffers; ++i) { XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed"); AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]); if (myInfo.mDone) break; } // set the volume of the queue XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume"); XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener"); #if !TARGET_OS_IPHONE if (rate > 0) { UInt32 propValue = 1; XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch"); propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain; XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm"); propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0 XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch"); if (rate != 1) { XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate"); } if (doPrint) { printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain")); } } #endif // lets start playing now - stop is called in the AQTestBufferCallback when there's // no more to read from the file XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed"); do { CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false); currentTime += .25; if (duration > 0 && currentTime >= duration) break; } while (gIsRunning); CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false); XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed"); XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed"); } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); } catch (...) { fprintf(stderr, "Unspecified exception\n"); } return 0; }
nsresult AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc, const nsTArray<uint8_t>& aExtraData) { // Request the properties from CoreAudio using the codec magic cookie AudioFormatInfo formatInfo; PodZero(&formatInfo.mASBD); formatInfo.mASBD.mFormatID = mFormatID; if (mFormatID == kAudioFormatMPEG4AAC) { formatInfo.mASBD.mFormatFlags = mConfig.extended_profile; } formatInfo.mMagicCookieSize = aExtraData.Length(); formatInfo.mMagicCookie = aExtraData.Elements(); UInt32 formatListSize; // Attempt to retrieve the default format using // kAudioFormatProperty_FormatInfo method. // This method only retrieves the FramesPerPacket information required // by the decoder, which depends on the codec type and profile. aDesc.mFormatID = mFormatID; aDesc.mChannelsPerFrame = mConfig.channel_count; aDesc.mSampleRate = mConfig.samples_per_second; UInt32 inputFormatSize = sizeof(aDesc); OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &inputFormatSize, &aDesc); if (NS_WARN_IF(rv)) { return NS_ERROR_FAILURE; } // If any of the methods below fail, we will return the default format as // created using kAudioFormatProperty_FormatInfo above. rv = AudioFormatGetPropertyInfo(kAudioFormatProperty_FormatList, sizeof(formatInfo), &formatInfo, &formatListSize); if (rv || (formatListSize % sizeof(AudioFormatListItem))) { return NS_OK; } size_t listCount = formatListSize / sizeof(AudioFormatListItem); nsAutoArrayPtr<AudioFormatListItem> formatList( new AudioFormatListItem[listCount]); rv = AudioFormatGetProperty(kAudioFormatProperty_FormatList, sizeof(formatInfo), &formatInfo, &formatListSize, formatList); if (rv) { return NS_OK; } LOG("found %u available audio stream(s)", formatListSize / sizeof(AudioFormatListItem)); // Get the index number of the first playable format. // This index number will be for the highest quality layer the platform // is capable of playing. UInt32 itemIndex; UInt32 indexSize = sizeof(itemIndex); rv = AudioFormatGetProperty(kAudioFormatProperty_FirstPlayableFormatFromList, formatListSize, formatList, &indexSize, &itemIndex); if (rv) { return NS_OK; } aDesc = formatList[itemIndex].mASBD; return NS_OK; }
void CAAudioFileFormats::FileFormatInfo::LoadDataFormats() { if (mDataFormats != NULL) return; UInt32 *writableFormats = NULL, *readableFormats = NULL; int nWritableFormats, nReadableFormats; // get all writable formats UInt32 size; OSStatus err = AudioFormatGetPropertyInfo(kAudioFormatProperty_EncodeFormatIDs, 0, NULL, &size); if (err != noErr) goto bail; nWritableFormats = size / sizeof(UInt32); writableFormats = new UInt32[nWritableFormats]; err = AudioFormatGetProperty(kAudioFormatProperty_EncodeFormatIDs, 0, NULL, &size, writableFormats); if (err != noErr) goto bail; // get all readable formats err = AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size); if (err != noErr) goto bail; nReadableFormats = size / sizeof(UInt32); readableFormats = new UInt32[nReadableFormats]; err = AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, readableFormats); if (err != noErr) goto bail; err = AudioFileGetGlobalInfoSize(kAudioFileGlobalInfo_AvailableFormatIDs, sizeof(UInt32), &mFileTypeID, &size); if (err == noErr) { mNumDataFormats = size / sizeof(OSType); OSType *formatIDs = new OSType[mNumDataFormats]; err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_AvailableFormatIDs, sizeof(UInt32), &mFileTypeID, &size, formatIDs); if (err == noErr) { mDataFormats = new DataFormatInfo[mNumDataFormats]; for (int j = 0; j < mNumDataFormats; ++j) { int k; bool anyBigEndian = false, anyLittleEndian = false; DataFormatInfo *dfi = &mDataFormats[j]; dfi->mFormatID = formatIDs[j]; dfi->mReadable = (dfi->mFormatID == kAudioFormatLinearPCM); dfi->mWritable = (dfi->mFormatID == kAudioFormatLinearPCM); for (k = 0; k < nReadableFormats; ++k) if (readableFormats[k] == dfi->mFormatID) { dfi->mReadable = true; break; } for (k = 0; k < nWritableFormats; ++k) if (writableFormats[k] == dfi->mFormatID) { dfi->mWritable = true; break; } dfi->mNumVariants = 0; AudioFileTypeAndFormatID tf = { mFileTypeID, dfi->mFormatID }; err = AudioFileGetGlobalInfoSize(kAudioFileGlobalInfo_AvailableStreamDescriptionsForFormat, sizeof(AudioFileTypeAndFormatID), &tf, &size); if (err == noErr) { dfi->mNumVariants = size / sizeof(AudioStreamBasicDescription); dfi->mVariants = new AudioStreamBasicDescription[dfi->mNumVariants]; err = AudioFileGetGlobalInfo(kAudioFileGlobalInfo_AvailableStreamDescriptionsForFormat, sizeof(AudioFileTypeAndFormatID), &tf, &size, dfi->mVariants); if (err) { dfi->mNumVariants = 0; delete[] dfi->mVariants; dfi->mVariants = NULL; } else { for (k = 0; k < dfi->mNumVariants; ++k) { AudioStreamBasicDescription *desc = &dfi->mVariants[k]; if (desc->mBitsPerChannel > 8) { if (desc->mFormatFlags & kAudioFormatFlagIsBigEndian) anyBigEndian = true; else anyLittleEndian = true; } } } } dfi->mEitherEndianPCM = (anyBigEndian && anyLittleEndian); } } delete[] formatIDs; } bail: delete[] readableFormats; delete[] writableFormats; }
// Will set mChannelLayout if a channel layout could properly be identified // and is supported. nsresult AppleATDecoder::SetupChannelLayout() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); // Determine the channel layout. UInt32 propertySize; UInt32 size; OSStatus status = AudioConverterGetPropertyInfo(mConverter, kAudioConverterOutputChannelLayout, &propertySize, NULL); if (status || !propertySize) { LOG("Couldn't get channel layout property (%s)", FourCC2Str(status)); return NS_ERROR_FAILURE; } auto data = MakeUnique<uint8_t[]>(propertySize); size = propertySize; status = AudioConverterGetProperty(mConverter, kAudioConverterInputChannelLayout, &size, data.get()); if (status || size != propertySize) { LOG("Couldn't get channel layout property (%s)", FourCC2Str(status)); return NS_ERROR_FAILURE; } AudioChannelLayout* layout = reinterpret_cast<AudioChannelLayout*>(data.get()); AudioChannelLayoutTag tag = layout->mChannelLayoutTag; // if tag is kAudioChannelLayoutTag_UseChannelDescriptions then the structure // directly contains the the channel layout mapping. // If tag is kAudioChannelLayoutTag_UseChannelBitmap then the layout will // be defined via the bitmap and can be retrieved using // kAudioFormatProperty_ChannelLayoutForBitmap property. // Otherwise the tag itself describes the layout. if (tag != kAudioChannelLayoutTag_UseChannelDescriptions) { AudioFormatPropertyID property = tag == kAudioChannelLayoutTag_UseChannelBitmap ? kAudioFormatProperty_ChannelLayoutForBitmap : kAudioFormatProperty_ChannelLayoutForTag; if (property == kAudioFormatProperty_ChannelLayoutForBitmap) { status = AudioFormatGetPropertyInfo(property, sizeof(UInt32), &layout->mChannelBitmap, &propertySize); } else { status = AudioFormatGetPropertyInfo(property, sizeof(AudioChannelLayoutTag), &tag, &propertySize); } if (status || !propertySize) { LOG("Couldn't get channel layout property info (%s:%s)", FourCC2Str(property), FourCC2Str(status)); return NS_ERROR_FAILURE; } data = MakeUnique<uint8_t[]>(propertySize); layout = reinterpret_cast<AudioChannelLayout*>(data.get()); size = propertySize; if (property == kAudioFormatProperty_ChannelLayoutForBitmap) { status = AudioFormatGetProperty(property, sizeof(UInt32), &layout->mChannelBitmap, &size, layout); } else { status = AudioFormatGetProperty(property, sizeof(AudioChannelLayoutTag), &tag, &size, layout); } if (status || size != propertySize) { LOG("Couldn't get channel layout property (%s:%s)", FourCC2Str(property), FourCC2Str(status)); return NS_ERROR_FAILURE; } // We have retrieved the channel layout from the tag or bitmap. // We can now directly use the channel descriptions. layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions; } if (layout->mNumberChannelDescriptions > MAX_AUDIO_CHANNELS || layout->mNumberChannelDescriptions != mOutputFormat.mChannelsPerFrame) { LOG("Nonsensical channel layout or not matching the original channel number"); return NS_ERROR_FAILURE; } AudioConfig::Channel channels[MAX_AUDIO_CHANNELS]; for (uint32_t i = 0; i < layout->mNumberChannelDescriptions; i++) { AudioChannelLabel id = layout->mChannelDescriptions[i].mChannelLabel; AudioConfig::Channel channel = ConvertChannelLabel(id); channels[i] = channel; } mChannelLayout = MakeUnique<AudioConfig::ChannelLayout>(mOutputFormat.mChannelsPerFrame, channels); return NS_OK; }
MediaResult AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc, const nsTArray<uint8_t>& aExtraData) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); // Request the properties from CoreAudio using the codec magic cookie AudioFormatInfo formatInfo; PodZero(&formatInfo.mASBD); formatInfo.mASBD.mFormatID = mFormatID; if (mFormatID == kAudioFormatMPEG4AAC) { formatInfo.mASBD.mFormatFlags = mConfig.mExtendedProfile; } formatInfo.mMagicCookieSize = aExtraData.Length(); formatInfo.mMagicCookie = aExtraData.Elements(); UInt32 formatListSize; // Attempt to retrieve the default format using // kAudioFormatProperty_FormatInfo method. // This method only retrieves the FramesPerPacket information required // by the decoder, which depends on the codec type and profile. aDesc.mFormatID = mFormatID; aDesc.mChannelsPerFrame = mConfig.mChannels; aDesc.mSampleRate = mConfig.mRate; UInt32 inputFormatSize = sizeof(aDesc); OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &inputFormatSize, &aDesc); if (NS_WARN_IF(rv)) { return MediaResult( NS_ERROR_FAILURE, RESULT_DETAIL("Unable to get format info:%d", int32_t(rv))); } // If any of the methods below fail, we will return the default format as // created using kAudioFormatProperty_FormatInfo above. rv = AudioFormatGetPropertyInfo(kAudioFormatProperty_FormatList, sizeof(formatInfo), &formatInfo, &formatListSize); if (rv || (formatListSize % sizeof(AudioFormatListItem))) { return NS_OK; } size_t listCount = formatListSize / sizeof(AudioFormatListItem); auto formatList = MakeUnique<AudioFormatListItem[]>(listCount); rv = AudioFormatGetProperty(kAudioFormatProperty_FormatList, sizeof(formatInfo), &formatInfo, &formatListSize, formatList.get()); if (rv) { return NS_OK; } LOG("found %zu available audio stream(s)", formatListSize / sizeof(AudioFormatListItem)); // Get the index number of the first playable format. // This index number will be for the highest quality layer the platform // is capable of playing. UInt32 itemIndex; UInt32 indexSize = sizeof(itemIndex); rv = AudioFormatGetProperty(kAudioFormatProperty_FirstPlayableFormatFromList, formatListSize, formatList.get(), &indexSize, &itemIndex); if (rv) { return NS_OK; } aDesc = formatList[itemIndex].mASBD; return NS_OK; }