static double DebugHostTime(const AudioTimeStamp &ts) { static UInt64 baseHostTime = 0; if (!(ts.mFlags & kAudioTimeStampHostTimeValid)) return -1.; if (baseHostTime == 0) baseHostTime = ts.mHostTime; return double(SInt64(ts.mHostTime) - SInt64(baseHostTime)) * CAHostTimeBase::GetInverseFrequency(); }
OSStatus CAPlayThrough::InputProc(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList * ioData) { OSStatus err = noErr; CAPlayThrough *This = (CAPlayThrough *)inRefCon; if (This->mFirstInputTime < 0.) This->mFirstInputTime = inTimeStamp->mSampleTime; //Get the new audio data err = AudioUnitRender(This->mInputUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, //# of frames requested This->mInputBuffer);// Audio Buffer List to hold data checkErr(err); if(!err) err = This->mBuffer->Store(This->mInputBuffer, Float64(inNumberFrames), SInt64(inTimeStamp->mSampleTime)); return err; }
SInt64 BufferedFile::LSeek(SInt64 offset, int origin) { if (BufferMode == ReadBuffer) { if (origin == Seek_Cur) { // Seek can fall either before or after Pos in the buffer, // but it must be within bounds. if (((unsigned(offset) + Pos)) <= DataSize) { Pos += (unsigned)offset; return SInt64(FilePos - DataSize + Pos); } // Lightweight buffer "Flush". We do this to avoid an extra seek // back operation which would take place if we called FlushBuffer directly. origin = Seek_Set; offset = (SInt64)(FilePos - DataSize + Pos) + offset; Pos = DataSize = 0; } else if (origin == Seek_Set) { if (((UInt64)offset - (FilePos-DataSize)) <= DataSize) { Pos = (unsigned)((UInt64)offset - (FilePos-DataSize)); return offset; } Pos = DataSize = 0; } else { FlushBuffer(); } } else { FlushBuffer(); } /* OVR_ASSERT(BufferMode != NoBuffer); if (origin == Seek_Cur && offset + Pos < DataSize) { Pos += int (offset); return FilePos - DataSize + Pos; } else if (origin == Seek_Set && offset >= SInt64(FilePos - DataSize) && offset < SInt64(FilePos)) { Pos = unsigned(offset - FilePos + DataSize); return FilePos - DataSize + Pos; } FlushBuffer(); */ FilePos = pFile->LSeek(offset,origin); return FilePos; }
SInt64 JSON::as_integer() const { switch (m_type) { case e_null: return 0; case e_bool: return (m_bool ? 1 : 0); case e_integer: return m_integer; case e_real: return SInt64(m_real); case e_string: return stringToNumber(m_string).as_integer(); case e_array: return arrayToNumber(m_array).as_integer(); case e_object: return objectToNumber(m_object).as_integer(); } return 0; }
void CAAudioFileReader::SetCurrentPosition(double loc) { bool wasRunning = IsRunning(); if (wasRunning) Stop(); SInt64 frameNumber = SInt64(loc * GetFile().GetNumberFrames() + 0.5); try { GetFile().Seek(frameNumber); } catch (...) { } if (wasRunning) Start(); }
void ZKMORFileReader::SynchronousSetCurrentPosition(double loc) { SInt64 frameNumber = SInt64(loc * GetFile().GetNumberFrames() + 0.5); // SInt64 frameNumber = SInt64(loc * GetFile().GetNumberFrames()); try { GetFile().Seek(frameNumber); Prime(); char debugStr[255]; SNPrint(debugStr, 255); } catch (CAXException &e) { char errStr[255]; e.FormatError(errStr); ZKMORLogError(kZKMORLogSource_Zone, CFSTR("Could not set position on file 0x%x to %lli : %s"), this, frameNumber, errStr); } catch (...) { ZKMORLogError(kZKMORLogSource_Zone, CFSTR("Could not set position on file 0x%x to %lli"), this, frameNumber); } }
OSStatus CAPlayThrough::OutputProc(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *TimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList * ioData) { OSStatus err = noErr; CAPlayThrough *This = (CAPlayThrough *)inRefCon; Float64 rate = 0.0; AudioTimeStamp inTS, outTS; if (This->mFirstInputTime < 0.) { // input hasn't run yet -> silence MakeBufferSilent (ioData); return noErr; } //use the varispeed playback rate to offset small discrepancies in sample rate //first find the rate scalars of the input and output devices err = AudioDeviceGetCurrentTime(This->mInputDevice.mID, &inTS); // this callback may still be called a few times after the device has been stopped if (err) { MakeBufferSilent (ioData); return noErr; } err = AudioDeviceGetCurrentTime(This->mOutputDevice.mID, &outTS); checkErr(err); rate = inTS.mRateScalar / outTS.mRateScalar; err = AudioUnitSetParameter(This->mVarispeedUnit,kVarispeedParam_PlaybackRate,kAudioUnitScope_Global,0, rate,0); checkErr(err); //get Delta between the devices and add it to the offset if (This->mFirstOutputTime < 0.) { This->mFirstOutputTime = TimeStamp->mSampleTime; Float64 delta = (This->mFirstInputTime - This->mFirstOutputTime); This->ComputeThruOffset(); //changed: 3865519 11/10/04 if (delta < 0.0) This->mInToOutSampleOffset -= delta; else This->mInToOutSampleOffset = -delta + This->mInToOutSampleOffset; MakeBufferSilent (ioData); return noErr; } //copy the data from the buffers err = This->mBuffer->Fetch(ioData, inNumberFrames, SInt64(TimeStamp->mSampleTime - This->mInToOutSampleOffset)); //old line of code different once ring buffer is replaced //err = This->mBuffer->Fetch(ioData, inNumberFrames, SInt64(TimeStamp->mSampleTime - This->mInToOutSampleOffset), false); if(err != kCARingBufferError_OK) { MakeBufferSilent (ioData); SInt64 bufferStartTime, bufferEndTime; This->mBuffer->GetTimeBounds(bufferStartTime, bufferEndTime); This->mInToOutSampleOffset = TimeStamp->mSampleTime - bufferStartTime; } return noErr; }
void CAAudioFileConverter::ConvertFile(const ConversionParameters &_params) { FSRef destFSRef; UInt32 propertySize; CAStreamBasicDescription destFormat; CAAudioChannelLayout origSrcFileLayout, srcFileLayout, destFileLayout; bool openedSourceFile = false, createdOutputFile = false; mParams = _params; mReadBuffer = NULL; mReadPtrs = NULL; CABufferList *writeBuffer = NULL; CABufferList *writePtrs = NULL; PrepareConversion(); try { if (TaggedDecodingFromCAF()) ReadCAFInfo(); OpenInputFile(); openedSourceFile = true; // get input file's format const CAStreamBasicDescription &srcFormat = mSrcFile.GetFileDataFormat(); if (mParams.flags & kOpt_Verbose) { printf("Input file: %s, %qd frames\n", mParams.input.filePath ? basename(mParams.input.filePath) : "?", mSrcFile.GetNumberFrames()); } mSrcFormat = srcFormat; bool encoding = !destFormat.IsPCM(); bool decoding = !srcFormat.IsPCM(); // prepare output file's format destFormat = mParams.output.dataFormat; if (!encoding && destFormat.mSampleRate == 0.) // on encode, it's OK to have a 0 sample rate; ExtAudioFile will get the SR from the converter and set it on the file. // on decode or PCM->PCM, a sample rate of 0 is interpreted as using the source sample rate destFormat.mSampleRate = srcFormat.mSampleRate; // source channel layout srcFileLayout = mSrcFile.GetFileChannelLayout(); origSrcFileLayout = srcFileLayout; if (mParams.input.channelLayoutTag != 0) { XThrowIf(AudioChannelLayoutTag_GetNumberOfChannels(mParams.input.channelLayoutTag) != srcFormat.mChannelsPerFrame, -1, "input channel layout has wrong number of channels for file"); srcFileLayout = CAAudioChannelLayout(mParams.input.channelLayoutTag); mSrcFile.SetFileChannelLayout(srcFileLayout); } // destination channel layout int outChannels = mParams.output.channels; if (mParams.output.channelLayoutTag != 0) { // use the one specified by caller, if any destFileLayout = CAAudioChannelLayout(mParams.output.channelLayoutTag); } else if (srcFileLayout.IsValid()) { // otherwise, assume the same as the source, if any destFileLayout = srcFileLayout; } if (destFileLayout.IsValid()) { // the output channel layout specifies the number of output channels if (outChannels != -1) XThrowIf((unsigned)outChannels != destFileLayout.NumberChannels(), -1, "output channel layout has wrong number of channels"); else outChannels = destFileLayout.NumberChannels(); } if (!(mParams.flags & kOpt_NoSanitizeOutputFormat)) { // adjust the output format's channels; output.channels overrides the channels if (outChannels == -1) outChannels = srcFormat.mChannelsPerFrame; if (outChannels > 0) { destFormat.mChannelsPerFrame = outChannels; destFormat.mBytesPerPacket *= outChannels; destFormat.mBytesPerFrame *= outChannels; } // use AudioFormat API to clean up the output format propertySize = sizeof(AudioStreamBasicDescription); XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propertySize, &destFormat), "get destination format info"); } OpenOutputFile(srcFormat, destFormat, destFSRef, destFileLayout); createdOutputFile = true; mDestFormat = destFormat; // set up client formats CAStreamBasicDescription srcClientFormat, destClientFormat; { CAAudioChannelLayout srcClientLayout, destClientLayout; if (encoding) { if (decoding) { // transcoding // XThrowIf(encoding && decoding, -1, "transcoding not currently supported"); if (srcFormat.mChannelsPerFrame > 2 || destFormat.mChannelsPerFrame > 2) CAXException::Warning("Transcoding multichannel audio may not handle channel layouts correctly", 0); srcClientFormat.SetCanonical(std::min(srcFormat.mChannelsPerFrame, destFormat.mChannelsPerFrame), true); srcClientFormat.mSampleRate = std::max(srcFormat.mSampleRate, destFormat.mSampleRate); mSrcFile.SetClientFormat(srcClientFormat, NULL); destClientFormat = srcClientFormat; } else { // encoding srcClientFormat = srcFormat; destClientFormat = srcFormat; } // by here, destClientFormat will have a valid sample rate destClientLayout = srcFileLayout.IsValid() ? srcFileLayout : destFileLayout; mDestFile.SetClientFormat(destClientFormat, &destClientLayout); } else { // decoding or PCM->PCM if (destFormat.mSampleRate == 0.) destFormat.mSampleRate = srcFormat.mSampleRate; destClientFormat = destFormat; srcClientFormat = destFormat; srcClientLayout = destFileLayout; mSrcFile.SetClientFormat(srcClientFormat, &srcClientLayout); } } XThrowIf(srcClientFormat.mBytesPerPacket == 0, -1, "source client format not PCM"); XThrowIf(destClientFormat.mBytesPerPacket == 0, -1, "dest client format not PCM"); if (encoding) { // set the bitrate if (mParams.output.bitRate != -1) { if (mParams.flags & kOpt_Verbose) printf("bitrate = %ld\n", mParams.output.bitRate); mDestFile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &mParams.output.bitRate); } // set the codec quality if (mParams.output.codecQuality != -1) { if (mParams.flags & kOpt_Verbose) printf("codec quality = %ld\n", mParams.output.codecQuality); mDestFile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &mParams.output.codecQuality); } // set the bitrate strategy -- called bitrate format in the codecs since it had already shipped if (mParams.output.strategy != -1) { if (mParams.flags & kOpt_Verbose) printf("strategy = %ld\n", mParams.output.strategy); mDestFile.SetConverterProperty(kAudioCodecBitRateFormat, sizeof(UInt32), &mParams.output.strategy); } } // set the SRC quality if (mParams.output.srcQuality != -1) { if (srcFormat.mSampleRate != 0. && destFormat.mSampleRate != 0. && srcFormat.mSampleRate != destFormat.mSampleRate) { if (mParams.flags & kOpt_Verbose) printf("SRC quality = %ld\n", mParams.output.srcQuality); if (encoding) mDestFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality); else mSrcFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality); } } if (decoding) { if (mParams.output.primeMethod != -1) mSrcFile.SetConverterProperty(kAudioConverterPrimeMethod, sizeof(UInt32), &mParams.output.primeMethod); } PrintFormats(&origSrcFileLayout); // prepare I/O buffers UInt32 bytesToRead = 0x10000; UInt32 framesToRead = bytesToRead; // OK, ReadPackets will limit as appropriate ComputeReadSize(srcFormat, destFormat, bytesToRead, framesToRead); // const SInt64 totalFrames = mSrcFile.GetNumberFrames(); //#warning "GetNumberFrames() can be prohibitively slow for some formats" mReadBuffer = CABufferList::New("readbuf", srcClientFormat); mReadBuffer->AllocateBuffers(bytesToRead); mReadPtrs = CABufferList::New("readptrs", srcClientFormat); BeginConversion(); while (true) { //XThrowIf(Progress(mSrcFile.Tell(), totalFrames), userCanceledErr, "user stopped"); // this was commented out for awhile -- performance? make it optional? UInt32 nFrames = framesToRead; mReadPtrs->SetFrom(mReadBuffer); AudioBufferList *readbuf = &mReadPtrs->GetModifiableBufferList(); mSrcFile.Read(nFrames, readbuf); //printf("read %ld of %ld frames\n", nFrames, framesToRead); if (nFrames == 0) break; mDestFile.Write(nFrames, readbuf); if (ShouldTerminateConversion()) break; } if (decoding) { // fix up the destination file's length if necessary and possible SInt64 nframes = mSrcFile.GetNumberFrames(); if (nframes != 0) { // only shorten, don't try to lengthen nframes = SInt64(ceil(nframes * destFormat.mSampleRate / srcFormat.mSampleRate)); if (nframes < mDestFile.GetNumberFrames()) { mDestFile.SetNumberFrames(nframes); } } } EndConversion(); } catch (...) { delete mReadBuffer; delete mReadPtrs; delete writeBuffer; delete writePtrs; if (!createdOutputFile) PrintFormats(&origSrcFileLayout); try { mSrcFile.Close(); } catch (...) { } try { mDestFile.Close(); } catch (...) { } if (createdOutputFile) unlink(mOutName); throw; } delete mReadBuffer; delete mReadPtrs; delete writeBuffer; delete writePtrs; mSrcFile.Close(); mDestFile.Close(); if (TaggedEncodingToCAF()) WriteCAFInfo(); if (mParams.flags & kOpt_Verbose) { // must close to flush encoder; GetNumberFrames() not necessarily valid until afterwards but then // the file is closed CAAudioFile temp; FSRef destFSRef; if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) { temp.Open(destFSRef); printf("Output file: %s, %qd frames\n", basename(mOutName), temp.GetNumberFrames()); } } }
SInt64 RTPOverbufferWindow::CheckTransmitTime(const SInt64& inTransmitTime, const SInt64& inCurrentTime, SInt32 inPacketSize) { // if this is the beginning of a bucket interval, roll over figures from last time. // accumulate statistics over the period of a second if (inCurrentTime - fBucketBegin > fSendInterval) { fPreviousBucketBegin = fBucketBegin; fBucketBegin = inCurrentTime; if (fPreviousBucketBegin == 0) fPreviousBucketBegin = fBucketBegin - fSendInterval; fBytesDuringBucket = 0; if (inCurrentTime - fLastSecondStart > 1000) { fBytesDuringPreviousSecond = fBytesDuringLastSecond; fBytesDuringLastSecond = 0; fPreviousSecondStart = fLastSecondStart; fLastSecondStart = inCurrentTime; } fPreviousBucketTimeAhead = fBucketTimeAhead; } if (fOverbufferWindowBegin == -1) fOverbufferWindowBegin = inCurrentTime; if ((inTransmitTime <= inCurrentTime + fSendInterval) || (fOverbufferingEnabled && (inTransmitTime <= inCurrentTime + fSendInterval + fSendAheadDurationInMsec))) { // // If this happens, this packet needs to be sent regardless of overbuffering return -1; } if (!fOverbufferingEnabled || (fWindowSize == 0)) return inTransmitTime; // if the client is running low on memory, wait a while for it to be freed up // there's nothing magic bout these numbers, we're just trying to be conservative if ((fWindowSize != -1) && (inPacketSize * 5 > fWindowSize - fBytesSentSinceLastReport)) { return inCurrentTime + (fSendInterval * 5); // client reports don't come that often } // if we're far enough ahead, then wait until it's time to send more packets if (inTransmitTime - inCurrentTime > fMaxSendAheadTime) return inTransmitTime - fMaxSendAheadTime + fSendInterval; // during the first second just send packets normally // if (fPreviousSecondStart == -1) // return inCurrentTime + fSendInterval; // now figure if we want to send this packet during this bucket. We have two limitations. // First we scale up bitrate slowly, so we should only try and send a little more than we // sent recently (averaged over a second or two). However, we always try and send at // least the current bitrate and never more than double. // SInt32 currentBitRate = fBytesDuringBucket * 1000 / (inCurrentTime - fPreviousBucketBegin); // SInt32 averageBitRate = (fBytesDuringPreviousSecond + fBytesDuringLastSecond) * 1000 / (inCurrentTime - fPreviousSecondStart); // SInt32 averageBitRate = fBytesDuringPreviousSecond * 1000 / (fLastSecondStart - fPreviousSecondStart); fBucketTimeAhead = inTransmitTime - inCurrentTime; // printf("Current br = %d, average br = %d (cta = %qd, pta = %qd)\n", currentBitRate, averageBitRate, currentTimeAhead, fPreviousBucketTimeAhead); // always try and stay as far ahead as we were before if (fBucketTimeAhead < fPreviousBucketTimeAhead) return -1; // but don't send at more that double the bitrate (for any given time we should only get further // ahead by that amount of time) //printf("cta - pta = %qd, ct - pbb = %qd\n", fBucketTimeAhead - fPreviousBucketTimeAhead, SInt64((inCurrentTime - fPreviousBucketBegin) * (fOverbufferRate - 1.0))); if (fBucketTimeAhead - fPreviousBucketTimeAhead > ((inCurrentTime - fPreviousBucketBegin) * (fOverbufferRate - 1.0))) { fBucketTimeAhead = fPreviousBucketTimeAhead + SInt64((inCurrentTime - fPreviousBucketBegin) * (fOverbufferRate - 1.0)); return inCurrentTime + fSendInterval; // this will get us to the next bucket } // don't send more than 10% over the average bitrate for the previous second // if (currentBitRate > averageBitRate * 11 / 10) // return inCurrentTime + fSendInterval; // this will get us to the next bucket return -1; // send this packet }
static OSStatus InputCallback (void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) { #if CA_AU_PROFILE_TIME UInt64 now = CAHostTimeBase::GetTheCurrentTime(); #endif CAAudioFile &readFile = *(static_cast<CAAudioFile*>(inRefCon)); #if !CAAF_USE_EXTAUDIOFILE if (SInt64(inTimeStamp->mSampleTime) > readFile.GetNumberPackets()) { #else if (SInt64(inTimeStamp->mSampleTime) > readFile.GetNumberFrames()) { #endif #if DEBUG printf ("reading past end of input\n"); #endif return -1; } readFile.Seek (SInt64(inTimeStamp->mSampleTime)); readFile.Read (inNumberFrames, ioData); #if CA_AU_PROFILE_TIME sReadTime += (CAHostTimeBase::GetTheCurrentTime() - now); #endif return noErr; } static OSStatus FConvInputCallback (void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) { #if CA_AU_PROFILE_TIME UInt64 now = CAHostTimeBase::GetTheCurrentTime(); #endif CAAudioFile &readFile = *(static_cast<CAAudioFile*>(inRefCon)); // this test is ONLY needed in case of processing with a Format Converter type of AU // in all other cases, the CAAUProcessor class will NEVER call you for input // beyond the end of the file.... #if !CAAF_USE_EXTAUDIOFILE if (SInt64(inTimeStamp->mSampleTime) >= readFile.GetNumberPackets()) { #else if (SInt64(inTimeStamp->mSampleTime) >= readFile.GetNumberFrames()) { #endif return -1; } readFile.Seek (SInt64(inTimeStamp->mSampleTime)); UInt32 readPackets = inNumberFrames; // also, have to do this for a format converter AU - otherwise we'd just read what we're told #if !CAAF_USE_EXTAUDIOFILE if (SInt64(inTimeStamp->mSampleTime + inNumberFrames) > readFile.GetNumberPackets()) { #else if (SInt64(inTimeStamp->mSampleTime + inNumberFrames) > readFile.GetNumberFrames()) { #endif // first set this to zero as we're only going to read a partial number of frames AudioBuffer *buf = ioData->mBuffers; for (UInt32 i = ioData->mNumberBuffers; i--; ++buf) memset((Byte *)buf->mData, 0, buf->mDataByteSize); #if !CAAF_USE_EXTAUDIOFILE readPackets = UInt32 (readFile.GetNumberPackets() - SInt64(inTimeStamp->mSampleTime)); #else readPackets = UInt32 (readFile.GetNumberFrames() - SInt64(inTimeStamp->mSampleTime)); #endif } readFile.Read (readPackets, ioData); #if CA_AU_PROFILE_TIME sReadTime += (CAHostTimeBase::GetTheCurrentTime() - now); #endif return noErr; } struct ReadBuffer { AUOutputBL *readData; UInt32 readFrames; }; static OSStatus MemoryInputCallback (void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) { #if CA_AU_PROFILE_TIME UInt64 now = CAHostTimeBase::GetTheCurrentTime(); #endif ReadBuffer *readBuffer = (ReadBuffer*)inRefCon; if (((readBuffer->readFrames + inNumberFrames) * sizeof(Float32)) > (readBuffer->readData->ABL()->mBuffers[0].mDataByteSize)) { // going past read size AudioBuffer *buf = ioData->mBuffers; for (UInt32 i = ioData->mNumberBuffers; i--; ++buf) memset((Byte *)buf->mData, 0, buf->mDataByteSize); } else { AudioBuffer *buf = ioData->mBuffers; AudioBuffer *rBuf = readBuffer->readData->ABL()->mBuffers; for (UInt32 i = ioData->mNumberBuffers; i--; ++buf, ++rBuf) { AudioBuffer readB = *rBuf; readB.mData = static_cast<Float32*>(rBuf->mData) + readBuffer->readFrames; memcpy (buf->mData, readB.mData, buf->mDataByteSize); } readBuffer->readFrames += inNumberFrames; } #if CA_AU_PROFILE_TIME sReadTime += (CAHostTimeBase::GetTheCurrentTime() - now); #endif return noErr; } #pragma mark __Utility Helpers CFPropertyListRef ReadPresetFromPresetFile (char* filePath) { if (!filePath) return NULL; FSRef ref; if (FSPathMakeRef((UInt8 *)filePath, &ref, NULL)) return NULL; CFDataRef resourceData = NULL; CFPropertyListRef theData = NULL; CFStringRef errString = NULL; CFURLRef fileURL = CFURLCreateFromFSRef (kCFAllocatorDefault, &ref); if (fileURL == NULL) { goto home; } SInt32 result; // Read the XML file. Boolean status; status = CFURLCreateDataAndPropertiesFromResource (kCFAllocatorDefault, fileURL, &resourceData, // place to put file data NULL, NULL, &result); if (status == false || result) { goto home; } theData = CFPropertyListCreateFromXMLData (kCFAllocatorDefault, resourceData, kCFPropertyListImmutable, &errString); if (theData == NULL || errString) { if (theData) CFRelease (theData); theData = NULL; goto home; } home: if (fileURL) CFRelease (fileURL); if (resourceData) CFRelease (resourceData); if (errString) CFRelease (errString); return theData; } #pragma mark __the setup code #define OFFLINE_AU_CMD "[-au TYPE SUBTYPE MANU] The Audio Unit component description\n\t" #define INPUT_FILE "[-i /Path/To/File] The file that is to be processed.\n\t" #define OUTPUT_FILE "[-o /Path/To/File/To/Create] This will be in the same format as the input file\n\t" #define AU_PRESET_CMD "[-p /Path/To/AUPreset/File] Specify an AU Preset File to establish the state of the AU\n\t" #define SHORT_MEM_CMD "[-m] Just reads and processes the first half second of the input file\n\t" #define USE_MAX_FRAMES "[-f max_frames] default is 32768 (512 for aufc units)" static char* usageStr = "Usage: AU Process\n\t" OFFLINE_AU_CMD INPUT_FILE OUTPUT_FILE AU_PRESET_CMD SHORT_MEM_CMD USE_MAX_FRAMES; static int StrToOSType(const char *str, OSType &t) { char buf[4]; const char *p = str; int x; for (int i = 0; i < 4; ++i) { if (*p != '\\') { if ((buf[i] = *p++) == '\0') goto fail; } else { if (*++p != 'x') goto fail; if (sscanf(++p, "%02X", &x) != 1) goto fail; buf[i] = x; p += 2; } } t = EndianU32_BtoN(*(UInt32 *)buf); return p - str; fail: return 0; }
OSStatus CAPlayThrough::OutputProc(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *TimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList * ioData) { OSStatus err = noErr; CAPlayThrough *This = (CAPlayThrough *)inRefCon; Float64 rate = 0.0; AudioTimeStamp inTS, outTS; if (This->mFirstInputTime < 0.) { // input hasn't run yet -> silence MakeBufferSilent (ioData); return noErr; } //use the varispeed playback rate to offset small discrepancies in sample rate //first find the rate scalars of the input and output devices err = AudioDeviceGetCurrentTime(This->mInputDevice.mID, &inTS); // this callback may still be called a few times after the device has been stopped if (err) { MakeBufferSilent (ioData); return noErr; } err = AudioDeviceGetCurrentTime(This->mOutputDevice.mID, &outTS); checkErr(err); rate = inTS.mRateScalar / outTS.mRateScalar; err = AudioUnitSetParameter(This->mVarispeedUnit,kVarispeedParam_PlaybackRate,kAudioUnitScope_Global,0, rate,0); checkErr(err); //get Delta between the devices and add it to the offset if (This->mFirstOutputTime < 0.) { This->mFirstOutputTime = TimeStamp->mSampleTime; Float64 delta = (This->mFirstInputTime - This->mFirstOutputTime); This->ComputeThruOffset(); //changed: 3865519 11/10/04 if (delta < 0.0) This->mInToOutSampleOffset -= delta; else This->mInToOutSampleOffset = -delta + This->mInToOutSampleOffset; CAPT_DEBUG( "Set initial IOOffset to %f.\n", This->mInToOutSampleOffset ); MakeBufferSilent (ioData); return noErr; } //copy the data from the buffers err = This->mBuffer->Fetch(ioData, inNumberFrames, SInt64(TimeStamp->mSampleTime - This->mInToOutSampleOffset)); if( err != kCARingBufferError_OK ) { SInt64 bufferStartTime, bufferEndTime; This->mBuffer->GetTimeBounds( bufferStartTime, bufferEndTime ); CAPT_DEBUG( "Oops. Adjusting IOOffset from %f, ", This->mInToOutSampleOffset ); if ( err < kCARingBufferError_OK ) { CAPT_DEBUG( "ahead " ); if ( err == kCARingBufferError_WayBehind ) { MakeBufferSilent( ioData ); } This->mInToOutSampleOffset += std::max( ( TimeStamp->mSampleTime - This->mInToOutSampleOffset ) - bufferStartTime, kAdjustmentOffsetSamples ); } else if ( err > kCARingBufferError_OK ) { CAPT_DEBUG( "behind " ); if ( err == kCARingBufferError_WayAhead ) { MakeBufferSilent( ioData ); } // Adjust by the amount that we read past in the buffer This->mInToOutSampleOffset += std::max( ( ( TimeStamp->mSampleTime - This->mInToOutSampleOffset ) + inNumberFrames ) - bufferEndTime, kAdjustmentOffsetSamples ); } CAPT_DEBUG( "to %f.\n", This->mInToOutSampleOffset ); MakeBufferSilent ( ioData ); } return noErr; }