Ejemplo n.º 1
0
ComponentResult	AUInlineEffectBase::RenderBus(AudioUnitRenderActionFlags & flags, 
												const AudioTimeStamp & timestamp, UInt32 bus, UInt32 frames)
{
	if ((bus != 0) || (! HasInput(0))) {
		return kAudioUnitErr_NoConnection;
	}

	ComponentResult	result;
	AUInputElement *theInput = GetInput(0);
	result = theInput->PullInput(flags, timestamp, 0, frames);
	if (result == noErr) {
		AudioBufferList &	inputbuffers = theInput->GetBufferList();
		if (! IsBypassEffect()) {
			result = ProcessBufferLists(flags, inputbuffers, inputbuffers, frames);
		}
		GetOutput(0)->SetBufferList(inputbuffers);
	}
	
	return result;
}
Ejemplo n.º 2
0
// ____________________________________________________________________________
//
//	This method is called (potentially repeatedly) by ProcessForScheduledParams()
//	in order to perform the actual DSP required for this portion of the entire buffer
//	being processed.  The entire buffer can be divided up into smaller "slices"
//	according to the timestamps on the scheduled parameters...
//
OSStatus		AUEffectBase::ProcessScheduledSlice(	void				*inUserData,
														UInt32				inStartFrameInBuffer,
														UInt32				inSliceFramesToProcess,
														UInt32				inTotalBufferFrames )
{
	ScheduledProcessParams	&sliceParams = *((ScheduledProcessParams*)inUserData);

	AudioUnitRenderActionFlags 	&actionFlags = *sliceParams.actionFlags;
	AudioBufferList 			&inputBufferList = *sliceParams.inputBufferList;
	AudioBufferList 			&outputBufferList = *sliceParams.outputBufferList;

	UInt32 channelSize = inSliceFramesToProcess * mBytesPerFrame;
		// fix the size of the buffer we're operating on before we render this slice of time
	for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
		inputBufferList.mBuffers[i].mDataByteSize = inputBufferList.mBuffers[i].mNumberChannels * channelSize;
	}

	for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
		outputBufferList.mBuffers[i].mDataByteSize = outputBufferList.mBuffers[i].mNumberChannels * channelSize;
	}
		// process the buffer
	OSStatus result = ProcessBufferLists(actionFlags, inputBufferList, outputBufferList, inSliceFramesToProcess );

		// we just partially processed the buffers, so increment the data pointers to the next part of the buffer to process
	for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
		inputBufferList.mBuffers[i].mData =
			(char *)inputBufferList.mBuffers[i].mData + inputBufferList.mBuffers[i].mNumberChannels * channelSize;
	}

	for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
		outputBufferList.mBuffers[i].mData =
			(char *)outputBufferList.mBuffers[i].mData + outputBufferList.mBuffers[i].mNumberChannels * channelSize;
	}

	return result;
}
Ejemplo n.º 3
0
OSStatus 	AUEffectBase::Render(	AudioUnitRenderActionFlags &ioActionFlags,
											const AudioTimeStamp &		inTimeStamp,
											UInt32						nFrames)
{
	if (!HasInput(0))
		return kAudioUnitErr_NoConnection;

	OSStatus result = noErr;

	result = mMainInput->PullInput(ioActionFlags, inTimeStamp, 0 /* element */, nFrames);

	if (result == noErr)
	{
		if(ProcessesInPlace() && mMainOutput->WillAllocateBuffer())
		{
			mMainOutput->SetBufferList(mMainInput->GetBufferList() );
		}

		if (ShouldBypassEffect())
		{
			// leave silence bit alone

			if(!ProcessesInPlace() )
			{
				mMainInput->CopyBufferContentsTo (mMainOutput->GetBufferList());
			}
		}
		else
		{
			if(mParamList.size() == 0 )
			{
				// this will read/write silence bit
				result = ProcessBufferLists(ioActionFlags, mMainInput->GetBufferList(), mMainOutput->GetBufferList(), nFrames);
			}
			else
			{
				// deal with scheduled parameters...

				AudioBufferList &inputBufferList = mMainInput->GetBufferList();
				AudioBufferList &outputBufferList = mMainOutput->GetBufferList();

				ScheduledProcessParams processParams;
				processParams.actionFlags = &ioActionFlags;
				processParams.inputBufferList = &inputBufferList;
				processParams.outputBufferList = &outputBufferList;

				// divide up the buffer into slices according to scheduled params then
				// do the DSP for each slice (ProcessScheduledSlice() called for each slice)
				result = ProcessForScheduledParams(	mParamList,
													nFrames,
													&processParams );


				// fixup the buffer pointers to how they were before we started
				UInt32 channelSize = nFrames * mBytesPerFrame;
				for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
					UInt32 size = inputBufferList.mBuffers[i].mNumberChannels * channelSize;
					inputBufferList.mBuffers[i].mData = (char *)inputBufferList.mBuffers[i].mData - size;
					inputBufferList.mBuffers[i].mDataByteSize = size;
				}

				for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
					UInt32 size = outputBufferList.mBuffers[i].mNumberChannels * channelSize;
					outputBufferList.mBuffers[i].mData = (char *)outputBufferList.mBuffers[i].mData - size;
					outputBufferList.mBuffers[i].mDataByteSize = size;
				}
			}
		}

		if ( (ioActionFlags & kAudioUnitRenderAction_OutputIsSilence) && !ProcessesInPlace() )
		{
			AUBufferList::ZeroBuffer(mMainOutput->GetBufferList() );
		}
	}

	return result;
}
Ejemplo n.º 4
0
ComponentResult	AUInlineEffectBase::Process(AudioUnitRenderActionFlags & flags, AudioBufferList & buffer, UInt32 frames)
{
	return ProcessBufferLists(flags, buffer, buffer, frames);
}