OSStatus CAAUProcessor::Render (AudioBufferList *ioData, UInt32 &ioNumFrames, bool &outIsSilence, bool *outOLCompleted, bool *outOLRequiresPostProcess) { if (IsOfflineContext()) { if (!mPreflightDone) return kAudioUnitErr_InvalidOfflineRender; // YES - this is correct!!! you have to provide both if rendering in an offline Context *outOLCompleted = false; *outOLRequiresPostProcess = false; if (!IsOfflineAU() && !mUnit.Comp().Desc().IsFConv()) { // have we processed the input we expect too? // in an offline case, we want to create output that matches the input // for an OfflineAU type, it manages this internally, so we don't have to do anything // for a FormatConverter AU, we don't know and can't tell, so we can't do anything here // for any other AU type (effect, instrument) the Prime assumption is that it will // ask for the same number of frames of input as it is asked to output // so we can ask what it is doing, and get a sample accurate output (which is input + tail time) if (mRenderTimeStamp.mSampleTime + ioNumFrames >= InputSampleCount()) { // if we fall into here, we have just a partial number of input samples left // (less input less than what we've been asked to produce output for. *outOLCompleted = true; // we require post processing if we've got some tail (or latency) samples to flush through *outOLRequiresPostProcess = mTailSamplesToProcess > 0; if (InputSampleCount() > mRenderTimeStamp.mSampleTime) { ioNumFrames = (UInt32)(InputSampleCount() - mRenderTimeStamp.mSampleTime); } else { ioNumFrames = 0; } mTailSamplesRemaining = mTailSamplesToProcess; // we've got no input samples to process this time. SetBufferListToNumFrames (*ioData, ioNumFrames); if (ioNumFrames == 0) { if (*outOLRequiresPostProcess) SetInputCallback (mUnit, sSilentCallback); else mUnit.GlobalReset (); //flush this out, as we're done with this phase return noErr; } } } AudioUnitRenderActionFlags renderFlags = IsOfflineAU() ? kAudioOfflineUnitRenderAction_Render : 0; OSStatus result = mUnit.Render (&renderFlags, &mRenderTimeStamp, 0, ioNumFrames, ioData); if (result) { printf("E result %d\n", (int)result); } if (result) { if (mUnit.Comp().Desc().IsFConv()) { // this is the only way we can tell we're done with a FormatConverter AU // - ie. client returns an error from input result = noErr; *outOLCompleted = true; *outOLRequiresPostProcess = mTailSamplesToProcess > 0; ioNumFrames = 0; SetBufferListToNumFrames (*ioData, ioNumFrames); } else return result; } // for (UInt32 i = 0; i < ioNumFrames; ++i) { // union { // float f; // unsigned char c[4]; // } u; // u.f = ((float*)(ioData->mBuffers[0].mData))[i]; // printf("aup out %4d %14.10f %02X %02X %02X %02X\n", (int)i, u.f, u.c[0], u.c[1], u.c[2], u.c[3]); // } mRenderTimeStamp.mSampleTime += ioNumFrames; outIsSilence = (renderFlags & kAudioUnitRenderAction_OutputIsSilence); // if we're an Offline AU type, it will set this flag on completion of its processing if (renderFlags & kAudioOfflineUnitRenderAction_Complete) { // we now need to calculate how many frames we rendered. // as we're dealing with PCM non-interleaved buffers, we can calculate the numFrames simply ioNumFrames = ioData->mBuffers[0].mDataByteSize / sizeof(Float32); *outOLCompleted = true; *outOLRequiresPostProcess = false; mUnit.GlobalReset (); //flush this out, as we're done with this phase } else { if (*outOLCompleted) { if (*outOLRequiresPostProcess) result = SetInputCallback (mUnit, sSilentCallback); else mUnit.GlobalReset (); //flush this out, as we're done with this phase } } return result; } // rendering in a RT context: AudioUnitRenderActionFlags renderFlags = 0; OSStatus result = mUnit.Render (&renderFlags, &mRenderTimeStamp, 0, ioNumFrames, ioData); if (result) { printf("F result %d\n", (int)result); } if (!result) { mRenderTimeStamp.mSampleTime += ioNumFrames; outIsSilence = (renderFlags & kAudioUnitRenderAction_OutputIsSilence); } // for (UInt32 i = 0; i < ioNumFrames; ++i) { // union { // float f; // unsigned char c[4]; // } u; // u.f = ((float*)(ioData->mBuffers[0].mData))[i]; // printf("aup out %4d %14.10f %02X %02X %02X %02X\n", (int)i, u.f, u.c[0], u.c[1], u.c[2], u.c[3]); // } return result; }
OSStatus CAAUProcessor::DoInitialisation (const CAStreamBasicDescription &inInputFormat, const CAStreamBasicDescription &inOutputFormat, UInt64 inNumInputSamples, UInt32 inMaxFrames) { OSStatus result; if (inNumInputSamples == 0 && IsOfflineAU()) return kAudioUnitErr_InvalidOfflineRender; mNumInputSamples = inNumInputSamples; // first check that we can do this number of channels if (mUnit.CanDo (inInputFormat.NumberChannels(), inOutputFormat.NumberChannels()) == false) ca_require_noerr (result = kAudioUnitErr_FailedInitialization, home); // just uninitialise the AU as a matter of course ca_require_noerr (result = mUnit.Uninitialize(), home); ca_require_noerr (result = mUnit.SetFormat (kAudioUnitScope_Input, 0, inInputFormat), home); ca_require_noerr (result = mUnit.SetFormat (kAudioUnitScope_Output, 0, inOutputFormat), home); ca_require_noerr (result = SetMaxFramesPerRender (inMaxFrames), home); #if !TARGET_OS_IPHONE // if we're any AU but an offline AU, we should tell it that we've processing offline if (!IsOfflineAU()) { UInt32 isOffline = (IsOfflineContext() ? 1 : 0); // don't care whether this succeeds of fails as many AU's don't care about this // but the ones that do its important that they are told their render context mUnit.SetProperty (kAudioUnitProperty_OfflineRender, kAudioUnitScope_Global, 0, &isOffline, sizeof(isOffline)); } else { // tell the offline unit how many input samples we wish to process... mUnit.SetProperty (kAudioUnitOfflineProperty_InputSize, kAudioUnitScope_Global, 0, &mNumInputSamples, sizeof(mNumInputSamples)); } #endif ca_require_noerr (result = mUnit.Initialize(), home); ca_require_noerr (result = SetInputCallback (mUnit, mUserCallback), home); // finally reset our time stamp // the time stamp we use with the AU Render - only sample count is valid memset (&mRenderTimeStamp, 0, sizeof(mRenderTimeStamp)); mRenderTimeStamp.mFlags = kAudioTimeStampSampleTimeValid; // now, if we're NOT an offline AU, preflighting is not required // if we are an offline AU, we should preflight.. an offline AU will tell us when its preflighting is done mPreflightDone = false; if (mPreflightABL) { delete mPreflightABL; mPreflightABL = NULL; } mPreflightABL = new AUOutputBL (inOutputFormat); mLastPercentReported = 0; home: return result; }
OSStatus CAAUProcessor::Preflight (bool inProcessPreceedingTail) { printf(">>>>CAAUProcessor::Preflight\n"); //we're preflighting again, so reset ourselves if (mPreflightDone) { mPreflightDone = false; // the time stamp we use with the AU Render - only sample count is valid memset (&mRenderTimeStamp, 0, sizeof(mRenderTimeStamp)); mRenderTimeStamp.mFlags = kAudioTimeStampSampleTimeValid; mUnit.GlobalReset(); } Float64 sampleRate; OSStatus result = mUnit.GetSampleRate (kAudioUnitScope_Output, 0, sampleRate); CalculateRemainderSamples (sampleRate); UInt32 numFrames = MaxFramesPerRender(); if (numFrames == 0) return kAudioUnitErr_InvalidProperty; if (!IsOfflineAU()) { if ((IsOfflineContext() == false && inProcessPreceedingTail) || IsOfflineContext()) { // re-establish the user's input callback ca_require_noerr (result = SetInputCallback (mUnit, mUserCallback), home); // Consume the number of input samples indicated by the AU's latency or tail // based on whether the AU is being used in an offline context or not. UInt32 latSamps = IsOfflineContext() ? mLatencySamples : mTailSamples; printf("latSamps %d\n", (int)latSamps); latSamps = 0; while (latSamps > 0) { if (latSamps < numFrames) numFrames = latSamps; // process the samples (the unit's input callback will read the samples // from the file and convert them to float for processing AudioUnitRenderActionFlags renderFlags = 0; mPreflightABL->Prepare(); result = mUnit.Render (&renderFlags, &mRenderTimeStamp, 0, numFrames, mPreflightABL->ABL()); if (result) { printf("B result %d\n", (int)result); goto home; } mRenderTimeStamp.mSampleTime += numFrames; latSamps -= numFrames; } if (IsOfflineContext()) mRenderTimeStamp.mSampleTime = mLatencySamples; } else { // processing real-time but not processing preceeding tail, so we should preroll the AU ca_require_noerr (result = Preroll(mUnit, numFrames), home); // re-establish the user's input callback ca_require_noerr (result = SetInputCallback (mUnit, mUserCallback), home); mRenderTimeStamp.mSampleTime = 0; } } #if !TARGET_OS_IPHONE else { // re-establish the user's input callback ca_require_noerr (result = SetInputCallback (mUnit, mUserCallback), home); UInt32 preflightRequirements; UInt32 size; size = sizeof(preflightRequirements); ca_require_noerr (result = mUnit.GetProperty (kAudioUnitOfflineProperty_PreflightRequirements, kAudioUnitScope_Global, 0, &preflightRequirements, &size), home); // 0 indicates none, otherwise optional or required -> we do it for either if (preflightRequirements) { for (;;) { // here we need to do the preflight loop - we don't expect any data back, but have to // give the offline unit all of its input data to allow it to prepare its processing AudioUnitRenderActionFlags renderFlags = kAudioOfflineUnitRenderAction_Preflight; mPreflightABL->Prepare(); result = mUnit.Render (&renderFlags, &mRenderTimeStamp, 0, numFrames, mPreflightABL->ABL()); if (result) { printf("C result %d\n", (int)result); goto home; } mRenderTimeStamp.mSampleTime += numFrames; if (renderFlags & kAudioOfflineUnitRenderAction_Complete) break; } } // the time stamp we use with the AU Render - only sample count is valid mRenderTimeStamp.mSampleTime = 0; } #endif if (result == noErr) { mPreflightDone = true; } home: printf("<<<<CAAUProcessor::Preflight\n"); return result; }