void GetFormatFromInputFile (AudioFileID inputFile, CAStreamBasicDescription & inputFormat) { bool doPrint = true; UInt32 size; XThrowIfError(AudioFileGetPropertyInfo(inputFile, kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info"); UInt32 numFormats = size / sizeof(AudioFormatListItem); AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ]; XThrowIfError(AudioFileGetProperty(inputFile, kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format"); numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it if (numFormats == 1) { // this is the common case inputFormat = formatList[0].mASBD; } else { if (doPrint) { printf ("File has a %d layered data format:\n", (int)numFormats); for (unsigned int i = 0; i < numFormats; ++i) CAStreamBasicDescription(formatList[i].mASBD).Print(); printf("\n"); } // now we should look to see which decoders we have on the system XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's"); UInt32 numDecoders = size / sizeof(OSType); OSType *decoderIDs = new OSType [ numDecoders ]; XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's"); unsigned int i = 0; for (; i < numFormats; ++i) { OSType decoderID = formatList[i].mASBD.mFormatID; bool found = false; for (unsigned int j = 0; j < numDecoders; ++j) { if (decoderID == decoderIDs[j]) { found = true; break; } } if (found) break; } delete [] decoderIDs; if (i >= numFormats) { fprintf (stderr, "Cannot play any of the formats in this file\n"); throw kAudioFileUnsupportedDataFormatError; } inputFormat = formatList[i].mASBD; } delete [] formatList; }
// soundsource overrides Result SoundSourceCoreAudio::tryOpen(const AudioSourceConfig& audioSrcCfg) { const QString fileName(getLocalFileName()); //Open the audio file. OSStatus err; /** This code blocks works with OS X 10.5+ only. DO NOT DELETE IT for now. */ CFStringRef urlStr = CFStringCreateWithCharacters(0, reinterpret_cast<const UniChar *>(fileName.unicode()), fileName.size()); CFURLRef urlRef = CFURLCreateWithFileSystemPath(NULL, urlStr, kCFURLPOSIXPathStyle, false); err = ExtAudioFileOpenURL(urlRef, &m_audioFile); CFRelease(urlStr); CFRelease(urlRef); /** TODO: Use FSRef for compatibility with 10.4 Tiger. Note that ExtAudioFileOpen() is deprecated above Tiger, so we must maintain both code paths if someone finishes this part of the code. FSRef fsRef; CFURLGetFSRef(reinterpret_cast<CFURLRef>(url.get()), &fsRef); err = ExtAudioFileOpen(&fsRef, &m_audioFile); */ if (err != noErr) { qDebug() << "SSCA: Error opening file " << fileName; return ERR; } // get the input file format UInt32 inputFormatSize = sizeof(m_inputFormat); err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileDataFormat, &inputFormatSize, &m_inputFormat); if (err != noErr) { qDebug() << "SSCA: Error getting file format (" << fileName << ")"; return ERR; } m_bFileIsMp3 = m_inputFormat.mFormatID == kAudioFormatMPEGLayer3; // create the output format const UInt32 numChannels = (kChannelCountZero < audioSrcCfg.channelCountHint) ? audioSrcCfg.channelCountHint : 2; m_outputFormat = CAStreamBasicDescription(m_inputFormat.mSampleRate, numChannels, CAStreamBasicDescription::kPCMFormatFloat32, true); // set the client format err = ExtAudioFileSetProperty(m_audioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(m_outputFormat), &m_outputFormat); if (err != noErr) { qDebug() << "SSCA: Error setting file property"; return ERR; } //get the total length in frames of the audio file - copypasta: http://discussions.apple.com/thread.jspa?threadID=2364583&tstart=47 SInt64 totalFrameCount; UInt32 totalFrameCountSize = sizeof(totalFrameCount); err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileLengthFrames, &totalFrameCountSize, &totalFrameCount); if (err != noErr) { qDebug() << "SSCA: Error getting number of frames"; return ERR; } // // WORKAROUND for bug in ExtFileAudio // AudioConverterRef acRef; UInt32 acrsize = sizeof(AudioConverterRef); err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_AudioConverter, &acrsize, &acRef); //_ThrowExceptionIfErr(@"kExtAudioFileProperty_AudioConverter", err); AudioConverterPrimeInfo primeInfo; UInt32 piSize = sizeof(AudioConverterPrimeInfo); memset(&primeInfo, 0, piSize); err = AudioConverterGetProperty(acRef, kAudioConverterPrimeInfo, &piSize, &primeInfo); if (err != kAudioConverterErr_PropertyNotSupported) { // Only if decompressing //_ThrowExceptionIfErr(@"kAudioConverterPrimeInfo", err); m_headerFrames = primeInfo.leadingFrames; } else { m_headerFrames = 0; } setChannelCount(m_outputFormat.NumberChannels()); setFrameRate(m_inputFormat.mSampleRate); // NOTE(uklotzde): This is what I found when migrating // the code from SoundSource (sample-oriented) to the new // AudioSource (frame-oriented) API. It is not documented // when m_headerFrames > 0 and what the consequences are. setFrameCount(totalFrameCount/* - m_headerFrames*/); //Seek to position 0, which forces us to skip over all the header frames. //This makes sure we're ready to just let the Analyser rip and it'll //get the number of samples it expects (ie. no header frames). seekSampleFrame(0); return OK; }
int main (int argc, const char * argv[]) { #if TARGET_OS_WIN32 InitializeQTML(0L); #endif const char *fpath = NULL; Float32 volume = 1; Float32 duration = -1; Float32 currentTime = 0.0; Float32 rate = 0; int rQuality = 0; bool doPrint = false; for (int i = 1; i < argc; ++i) { const char *arg = argv[i]; if (arg[0] != '-') { if (fpath != NULL) { fprintf(stderr, "may only specify one file to play\n"); usage(); } fpath = arg; } else { arg += 1; if (arg[0] == 'v' || !strcmp(arg, "-volume")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &volume); } else if (arg[0] == 't' || !strcmp(arg, "-time")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &duration); } else if (arg[0] == 'r' || !strcmp(arg, "-rate")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%f", &rate); } else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) { if (++i == argc) MissingArgument(); arg = argv[i]; sscanf(arg, "%d", &rQuality); } else if (arg[0] == 'h' || !strcmp(arg, "-help")) { usage(); } else if (arg[0] == 'd' || !strcmp(arg, "-debug")) { doPrint = true; } else { fprintf(stderr, "unknown argument: %s\n\n", arg - 1); usage(); } } } if (fpath == NULL) usage(); if (doPrint) printf ("Playing file: %s\n", fpath); try { AQTestInfo myInfo; CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false); if (!sndFile) XThrowIfError (!sndFile, "can't parse file path"); OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile); CFRelease (sndFile); XThrowIfError(result, "AudioFileOpen failed"); UInt32 size; XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info"); UInt32 numFormats = size / sizeof(AudioFormatListItem); AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ]; XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format"); numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it if (numFormats == 1) { // this is the common case myInfo.mDataFormat = formatList[0].mASBD; // see if there is a channel layout (multichannel file) result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL); if (result == noErr && myInfo.mChannelLayoutSize > 0) { myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize]; XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout"); } } else { if (doPrint) { printf ("File has a %d layered data format:\n", (int)numFormats); for (unsigned int i = 0; i < numFormats; ++i) CAStreamBasicDescription(formatList[i].mASBD).Print(); } // now we should look to see which decoders we have on the system XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's"); UInt32 numDecoders = size / sizeof(OSType); OSType *decoderIDs = new OSType [ numDecoders ]; XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's"); unsigned int i = 0; for (; i < numFormats; ++i) { OSType decoderID = formatList[i].mASBD.mFormatID; bool found = false; for (unsigned int j = 0; j < numDecoders; ++j) { if (decoderID == decoderIDs[j]) { found = true; break; } } if (found) break; } delete [] decoderIDs; if (i >= numFormats) { fprintf (stderr, "Cannot play any of the formats in this file\n"); throw kAudioFileUnsupportedDataFormatError; } myInfo.mDataFormat = formatList[i].mASBD; myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout); myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize]; myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag; myInfo.mChannelLayout->mChannelBitmap = 0; myInfo.mChannelLayout->mNumberChannelDescriptions = 0; } delete [] formatList; if (doPrint) { printf ("Playing format: "); myInfo.mDataFormat.Print(); } XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed"); UInt32 bufferByteSize; // we need to calculate how many packets we read at a time, and how big a buffer we need // we base this on the size of the packets in the file and an approximate duration for each buffer { bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0); // first check to see what the max size of a packet is - if it is bigger // than our allocation default size, that needs to become larger UInt32 maxPacketSize; size = sizeof(maxPacketSize); XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size"); // adjust buffer size to represent about a half second of audio based on this format CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead); if (isFormatVBR) myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead]; else myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM) if (doPrint) printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead); } // (2) If the file has a cookie, we should get it and set it on the AQ size = sizeof(UInt32); result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL); if (!result && size) { char* cookie = new char [size]; XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file"); XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue"); delete [] cookie; } // set ACL if there is one if (myInfo.mChannelLayout) XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue"); // prime the queue with some data before starting myInfo.mDone = false; myInfo.mCurrentPacket = 0; for (int i = 0; i < kNumberBuffers; ++i) { XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed"); AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]); if (myInfo.mDone) break; } // set the volume of the queue XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume"); XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener"); #if !TARGET_OS_IPHONE if (rate > 0) { UInt32 propValue = 1; XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch"); propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain; XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm"); propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0 XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch"); if (rate != 1) { XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate"); } if (doPrint) { printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain")); } } #endif // lets start playing now - stop is called in the AQTestBufferCallback when there's // no more to read from the file XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed"); do { CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false); currentTime += .25; if (duration > 0 && currentTime >= duration) break; } while (gIsRunning); CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false); XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed"); XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed"); } catch (CAXException e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf)); } catch (...) { fprintf(stderr, "Unspecified exception\n"); } return 0; }
void WriteOutputFile (const char* outputFilePath, OSType dataFormat, Float64 srate, MusicTimeStamp sequenceLength, bool shouldPrint, AUGraph inGraph, UInt32 numFrames, MusicPlayer player) { OSStatus result = 0; UInt32 size; CAStreamBasicDescription outputFormat; outputFormat.mChannelsPerFrame = 2; outputFormat.mSampleRate = srate; outputFormat.mFormatID = dataFormat; AudioFileTypeID destFileType; CAAudioFileFormats::Instance()->InferFileFormatFromFilename (outputFilePath, destFileType); if (dataFormat == kAudioFormatLinearPCM) { outputFormat.mBytesPerPacket = outputFormat.mChannelsPerFrame * 2; outputFormat.mFramesPerPacket = 1; outputFormat.mBytesPerFrame = outputFormat.mBytesPerPacket; outputFormat.mBitsPerChannel = 16; if (destFileType == kAudioFileWAVEType) outputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; else outputFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; } else { // use AudioFormat API to fill out the rest. size = sizeof(outputFormat); FailIf ((result = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat)), fail, ""); } if (shouldPrint) { printf ("Writing to file: %s with format:\n* ", outputFilePath); outputFormat.Print(); } CFURLRef url; url = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8*)outputFilePath, strlen(outputFilePath), false); // create output file, delete existing file ExtAudioFileRef outfile; result = ExtAudioFileCreateWithURL(url, destFileType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &outfile); if (url) CFRelease (url); FailIf (result, fail, "ExtAudioFileCreateWithURL"); AudioUnit outputUnit; outputUnit = NULL; UInt32 nodeCount; FailIf ((result = AUGraphGetNodeCount (inGraph, &nodeCount)), fail, "AUGraphGetNodeCount"); for (UInt32 i = 0; i < nodeCount; ++i) { AUNode node; FailIf ((result = AUGraphGetIndNode(inGraph, i, &node)), fail, "AUGraphGetIndNode"); AudioComponentDescription desc; FailIf ((result = AUGraphNodeInfo(inGraph, node, &desc, NULL)), fail, "AUGraphNodeInfo"); if (desc.componentType == kAudioUnitType_Output) { FailIf ((result = AUGraphNodeInfo(inGraph, node, 0, &outputUnit)), fail, "AUGraphNodeInfo"); break; } } FailIf ((result = (outputUnit == NULL)), fail, "outputUnit == NULL"); { CAStreamBasicDescription clientFormat = CAStreamBasicDescription(); size = sizeof(clientFormat); FailIf ((result = AudioUnitGetProperty (outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &clientFormat, &size)), fail, "AudioUnitGetProperty: kAudioUnitProperty_StreamFormat"); size = sizeof(clientFormat); FailIf ((result = ExtAudioFileSetProperty(outfile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat)), fail, "ExtAudioFileSetProperty: kExtAudioFileProperty_ClientDataFormat"); { MusicTimeStamp currentTime; AUOutputBL outputBuffer (clientFormat, numFrames); AudioTimeStamp tStamp; memset (&tStamp, 0, sizeof(AudioTimeStamp)); tStamp.mFlags = kAudioTimeStampSampleTimeValid; int i = 0; int numTimesFor10Secs = (int)(10. / (numFrames / srate)); do { outputBuffer.Prepare(); AudioUnitRenderActionFlags actionFlags = 0; FailIf ((result = AudioUnitRender (outputUnit, &actionFlags, &tStamp, 0, numFrames, outputBuffer.ABL())), fail, "AudioUnitRender"); tStamp.mSampleTime += numFrames; FailIf ((result = ExtAudioFileWrite(outfile, numFrames, outputBuffer.ABL())), fail, "ExtAudioFileWrite"); FailIf ((result = MusicPlayerGetTime (player, ¤tTime)), fail, "MusicPlayerGetTime"); if (shouldPrint && (++i % numTimesFor10Secs == 0)) printf ("current time: %6.2f beats\n", currentTime); } while (currentTime < sequenceLength); } } // close ExtAudioFileDispose(outfile); return; fail: printf ("Problem: %ld\n", (long)result); exit(1); }
// soundsource overrides int SoundSourceCoreAudio::open() { //m_file.open(QIODevice::ReadOnly); //Open the audio file. OSStatus err; /** This code blocks works with OS X 10.5+ only. DO NOT DELETE IT for now. */ CFStringRef urlStr = CFStringCreateWithCharacters( 0, reinterpret_cast<const UniChar *>(m_qFilename.unicode()), m_qFilename.size()); CFURLRef urlRef = CFURLCreateWithFileSystemPath(NULL, urlStr, kCFURLPOSIXPathStyle, false); err = ExtAudioFileOpenURL(urlRef, &m_audioFile); CFRelease(urlStr); CFRelease(urlRef); /** TODO: Use FSRef for compatibility with 10.4 Tiger. Note that ExtAudioFileOpen() is deprecated above Tiger, so we must maintain both code paths if someone finishes this part of the code. FSRef fsRef; CFURLGetFSRef(reinterpret_cast<CFURLRef>(url.get()), &fsRef); err = ExtAudioFileOpen(&fsRef, &m_audioFile); */ if (err != noErr) { qDebug() << "SSCA: Error opening file " << m_qFilename; return ERR; } // get the input file format CAStreamBasicDescription inputFormat; UInt32 size = sizeof(inputFormat); m_inputFormat = inputFormat; err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileDataFormat, &size, &inputFormat); if (err != noErr) { qDebug() << "SSCA: Error getting file format (" << m_qFilename << ")"; return ERR; } //Debugging: //printf ("Source File format: "); inputFormat.Print(); //printf ("Dest File format: "); outputFormat.Print(); // create the output format m_outputFormat = CAStreamBasicDescription( inputFormat.mSampleRate, 2, CAStreamBasicDescription::kPCMFormatInt16, true); // set the client format err = ExtAudioFileSetProperty(m_audioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(m_outputFormat), &m_outputFormat); if (err != noErr) { qDebug() << "SSCA: Error setting file property"; return ERR; } // Set m_iChannels and m_samples. m_iChannels = m_outputFormat.NumberChannels(); //get the total length in frames of the audio file - copypasta: http://discussions.apple.com/thread.jspa?threadID=2364583&tstart=47 UInt32 dataSize; SInt64 totalFrameCount; dataSize = sizeof(totalFrameCount); //XXX: This looks sketchy to me - Albert err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileLengthFrames, &dataSize, &totalFrameCount); if (err != noErr) { qDebug() << "SSCA: Error getting number of frames"; return ERR; } // // WORKAROUND for bug in ExtFileAudio // AudioConverterRef acRef; UInt32 acrsize=sizeof(AudioConverterRef); err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_AudioConverter, &acrsize, &acRef); //_ThrowExceptionIfErr(@"kExtAudioFileProperty_AudioConverter", err); AudioConverterPrimeInfo primeInfo; UInt32 piSize=sizeof(AudioConverterPrimeInfo); memset(&primeInfo, 0, piSize); err = AudioConverterGetProperty(acRef, kAudioConverterPrimeInfo, &piSize, &primeInfo); if (err != kAudioConverterErr_PropertyNotSupported) { // Only if decompressing //_ThrowExceptionIfErr(@"kAudioConverterPrimeInfo", err); m_headerFrames=primeInfo.leadingFrames; } m_samples = (totalFrameCount/*-m_headerFrames*/)*m_iChannels; m_iDuration = m_samples / (inputFormat.mSampleRate * m_iChannels); m_iSampleRate = inputFormat.mSampleRate; qDebug() << m_samples << totalFrameCount << m_iChannels; //Seek to position 0, which forces us to skip over all the header frames. //This makes sure we're ready to just let the Analyser rip and it'll //get the number of samples it expects (ie. no header frames). seek(0); return OK; }