예제 #1
0
파일: oalOSX.cpp 프로젝트: Aye1/RVProject
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
UInt32	GetOALFormatFromASBD(CAStreamBasicDescription	&inASBD)
{
	switch (inASBD.mFormatID)
	{
		case kAudioFormatLinearPCM:
			// NOTE: if float: return 0;
			if (inASBD.mFormatFlags & kAudioFormatFlagIsFloat)  
			{
				return (0);		// float currently unsupported
			}
			else
			{
				if (inASBD.NumberChannels() == 1 && inASBD.mBitsPerChannel == 16)
					return AL_FORMAT_MONO16;
				else if (inASBD.NumberChannels() == 2 && inASBD.mBitsPerChannel == 16)
					return AL_FORMAT_STEREO16;
				else if (inASBD.NumberChannels() == 1 && inASBD.mBitsPerChannel == 8)
					return AL_FORMAT_MONO8;
				else if (inASBD.NumberChannels() == 2 && inASBD.mBitsPerChannel == 8)
					return AL_FORMAT_STEREO8;
			}
			break;
		
		default:
			return (0);
			break;
	}
	return (0);
}
예제 #2
0
파일: karoke.cpp 프로젝트: slagyr/8LU-DSP
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//	karoke::karoke
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
karoke::karoke(AudioUnit component)
	: AUEffectBase(component, false)
{
	CreateElements();
	CAStreamBasicDescription streamDescIn;
	streamDescIn.SetCanonical(NUM_INPUTS, false);	// number of input channels
	streamDescIn.mSampleRate = GetSampleRate();
	
	CAStreamBasicDescription streamDescOut;
	streamDescOut.SetCanonical(NUM_OUTPUTS, false);	// number of output channels
	streamDescOut.mSampleRate = GetSampleRate();
	
	Inputs().GetIOElement(0)->SetStreamFormat(streamDescIn);
	Outputs().GetIOElement(0)->SetStreamFormat(streamDescOut);
	
	Globals()->UseIndexedParameters(kNumberOfParameters);
	SetParameter(kParam_One, kDefaultValue_ParamOne );
        
#if AU_DEBUG_DISPATCHER
	mDebugDispatcher = new AUDebugDispatcher (this);
#endif
	
	mLeftFilter = new FirFilter(200);
	mLeftFilter->setCoeffecients(lp_200, 200);
	mRightFilter = new FirFilter(200);
	mRightFilter->setCoeffecients(lp_200, 200);
}
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//	AUPulseDetector::AUPulseDetector
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AUPulseDetector::AUPulseDetector(AudioUnit component)
	: AUEffectBase(component),
	  mChildObject(NULL)
{
	CreateElements();
	
	CAStreamBasicDescription monoDesc;
	monoDesc.SetAUCanonical (1, false);
	monoDesc.mSampleRate = 44100.;
	
	GetOutput(0)->SetStreamFormat(monoDesc);
	GetInput(0)->SetStreamFormat(monoDesc);

	Globals()->UseIndexedParameters (5);
	Globals()->SetParameter (kPulseThreshold, kPulseThresholdDefault);
	Globals()->SetParameter (kPulseLength, kPulseLengthDefault);
	Globals()->SetParameter (kPulseRestTime, kPulseRestTimeDefault);
	Globals()->SetParameter (kDoPulseDetection, kDoPulseDetectionDefault);
	Globals()->SetParameter (kWritePulseStats, 0);

	mPulseTimeStats = new PulseTS[kPulseTSSize];
	
#if AU_DEBUG_DISPATCHER
	mDebugDispatcher = new AUDebugDispatcher (this);
#endif
}
예제 #4
0
파일: CAAudioUnit.cpp 프로젝트: 63n/ardour
OSStatus	CAAudioUnit::ConfigureDynamicScope (AudioUnitScope 		inScope,
											UInt32 					inNumElements,
											UInt32 					*inChannelsPerElement,
											Float64 				inSampleRate)
{
	SInt32 numChannels = 0;
	bool isDyamic = HasDynamicScope (inScope, numChannels);
	if (isDyamic == false)
		return kAudioUnitErr_InvalidProperty;

	//lets to a sanity check...
	// if numChannels == -1, then it can do "any"...
	if (numChannels > 0) {
		SInt32 count = 0;
		for (unsigned int i = 0; i < inNumElements; ++i)
			count += inChannelsPerElement[i];
		if (count > numChannels)
			return kAudioUnitErr_InvalidPropertyValue;
	}

	OSStatus result = SetElementCount (inScope, inNumElements);
	if (result)
		return result;

	CAStreamBasicDescription desc;
	desc.mSampleRate = inSampleRate;
	for (unsigned int i = 0; i < inNumElements; ++i) {
		desc.SetCanonical (inChannelsPerElement[i], false);
		result = SetFormat (inScope, i, desc);
		if (result)
			return result;
	}
	return noErr;
}
예제 #5
0
파일: CAAudioUnit.cpp 프로젝트: 63n/ardour
OSStatus	CAAudioUnit::SetNumberChannels (AudioUnitScope	inScope,
										AudioUnitElement	inEl,
										UInt32				inChans)
{
			// set this as the output of the AU
	CAStreamBasicDescription desc;
	OSStatus result = GetFormat (inScope, inEl, desc);
		if (result) return result;
	desc.SetCanonical (inChans, desc.IsInterleaved());
	result = SetFormat (inScope, inEl, desc);
	return result;
}
예제 #6
0
Minim::AudioFormat::AudioFormat( float sampleRate, int numberOfChannels )
{
	CAStreamBasicDescription streamDesc;
	streamDesc.mSampleRate = 44100.0f;
	streamDesc.SetAUCanonical( numberOfChannels, true );
	mChannels = streamDesc.mChannelsPerFrame;
	mSampleRate = streamDesc.mSampleRate;
	mFrameRate = streamDesc.mSampleRate;
	mFrameSize = streamDesc.mBytesPerFrame;
	mSampleSizeInBits = streamDesc.mBitsPerChannel;
	mBigEndian = (streamDesc.mFormatFlags & kLinearPCMFormatFlagIsBigEndian);
}
예제 #7
0
파일: AUBuffer.cpp 프로젝트: 63n/ardour
void				AUBufferList::Allocate(const CAStreamBasicDescription &format, UInt32 nFrames)
{
	UInt32 nStreams;
	if (format.IsInterleaved()) {
		nStreams = 1;
	} else {
		nStreams = format.mChannelsPerFrame;
	}

	// careful -- the I/O thread could be running!
	if (nStreams > mAllocatedStreams) {
		size_t theHeaderSize = sizeof(AudioBufferList) - sizeof(AudioBuffer);
		mPtrs = (AudioBufferList *)CA_realloc(mPtrs,
									SafeMultiplyAddUInt32(nStreams, sizeof(AudioBuffer), theHeaderSize));
		mAllocatedStreams = nStreams;
	}
	UInt32 bytesPerStream = SafeMultiplyAddUInt32(nFrames, format.mBytesPerFrame, 0xF) & ~0xF;
	UInt32 nBytes = SafeMultiplyAddUInt32(nStreams, bytesPerStream, 0);
	if (nBytes > mAllocatedBytes) {
		if (mExternalMemory) {
			mExternalMemory = false;
			mMemory = NULL;
		}
		mMemory = (Byte *)CA_realloc(mMemory, nBytes);
		mAllocatedBytes = nBytes;
	}
	mAllocatedFrames = nFrames;
	mPtrState = kPtrsInvalid;
}
예제 #8
0
파일: AUBuffer.cpp 프로젝트: EQ4/JamomaMax
void				AUBufferList::Allocate(const CAStreamBasicDescription &format, UInt32 nFrames)
{
	UInt32 nStreams;
	UInt32 channelsPerStream;
	if (format.IsInterleaved()) {
		nStreams = 1;
		channelsPerStream = format.mChannelsPerFrame;
	} else {
		nStreams = format.mChannelsPerFrame;
		channelsPerStream = 1;
	}

	// careful -- the I/O thread could be running!
	if (nStreams > mAllocatedStreams) {
		mPtrs = (AudioBufferList *)CA_realloc(mPtrs, offsetof(AudioBufferList, mBuffers) + nStreams * sizeof(AudioBuffer));
		mAllocatedStreams = nStreams;
	}
	UInt32 bytesPerStream = (nFrames * format.mBytesPerFrame + 0xF) & ~0xF;
	UInt32 nBytes = nStreams * bytesPerStream;	
	if (nBytes > mAllocatedBytes) {
		if (mExternalMemory) {
			mExternalMemory = false;
			mMemory = NULL;
		}
		mMemory = (Byte *)CA_realloc(mMemory, nBytes);
		mAllocatedBytes = nBytes;
	}
	mAllocatedFrames = nFrames;
	mPtrState = kPtrsInvalid;
}
void	SonogramViewDemo::AllocateBuffers()
{
	mBlockSize = 1024;
	mNumBins = mBlockSize>>1;

	if (mSpectrumBuffer) {
		// delete calls deallocate
		delete (mSpectrumBuffer);
	}
	mSpectrumBuffer = new CARingBuffer();
	mSpectrumBuffer->Allocate(GetNumberOfChannels(), mNumBins*sizeof(Float32), kMaxSonogramLatency);

	CAStreamBasicDescription	bufClientDesc;		
	bufClientDesc.SetCanonical(GetNumberOfChannels(), false);
	bufClientDesc.mSampleRate = GetSampleRate();

	UInt32 frameLength = kDefaultValue_BufferSize*sizeof(Float32);
	
	if (mFetchingBufferList) {		
		mFetchingBufferList->DeallocateBuffers();
		delete(mFetchingBufferList);
	}
	mFetchingBufferList = CABufferList::New("fetch buffer", bufClientDesc );
	mFetchingBufferList->AllocateBuffers(frameLength);
	
	if (mSpectralDataBufferList) {
		mSpectralDataBufferList->DeallocateBuffers();
		delete(mSpectralDataBufferList);
	}
	mSpectralDataBufferList = CABufferList::New("temp buffer", bufClientDesc );
	mSpectralDataBufferList->AllocateBuffers(frameLength);

	memset (&mRenderStamp, 0, sizeof(AudioTimeStamp));
	mRenderStamp.mFlags = kAudioTimeStampSampleTimeValid;	
	
	
	mSpectralProcessor.free();
	mSpectralProcessor = new CASpectralProcessor(mBlockSize, mNumBins, GetNumberOfChannels(), GetMaxFramesPerSlice());
	
	if (mMinAmp) free(mMinAmp);
	mMinAmp = (Float32*) calloc(GetNumberOfChannels(), sizeof(Float32));
	
	if (mMaxAmp) free(mMaxAmp);
	mMaxAmp = (Float32*) calloc(GetNumberOfChannels(), sizeof(Float32));

		
}
예제 #10
0
/*! @method ChangeStreamFormat */
OSStatus			AUPannerBase::ChangeStreamFormat (
									AudioUnitScope						inScope,
									AudioUnitElement					inElement,
									const CAStreamBasicDescription & 	inPrevFormat,
									const CAStreamBasicDescription &	inNewFormat)
{
	if (inScope == kAudioUnitScope_Input && !InputChannelConfigIsSupported(inNewFormat.NumberChannels())) 
		return kAudioUnitErr_FormatNotSupported;
		
	if (inScope == kAudioUnitScope_Output && !OutputChannelConfigIsSupported(inNewFormat.NumberChannels())) 
		return kAudioUnitErr_FormatNotSupported;
		
	if (inNewFormat.NumberChannels() != inPrevFormat.NumberChannels())
		RemoveAudioChannelLayout(inScope, inElement);
		
	return AUBase::ChangeStreamFormat(inScope, inElement, inPrevFormat, inNewFormat);
}
void	CAAudioFileWriter::SetFile(const FSRef &parentDir, CFStringRef filename, AudioFileTypeID filetype, const CAStreamBasicDescription &dataFormat, const CAAudioChannelLayout *layout)
{
	Stop();
	CancelAndDisposeBuffers();
	
	delete mFile;   mFile = NULL;
	mFile = new CAAudioFile;
	mFile->CreateNew(parentDir, filename, filetype, dataFormat, layout ? &layout->Layout() : NULL);
	
	const CAStreamBasicDescription &fileFmt = mFile->GetFileDataFormat();
	CAStreamBasicDescription iofmt;
	iofmt.SetCanonical(fileFmt.mChannelsPerFrame, false);	// deinterleaved
	iofmt.mSampleRate = fileFmt.mSampleRate;
	mFile->SetClientFormat(iofmt, NULL);
	
	SetFormat(iofmt);
}
void	CAAudioFileReader::SetFile(const FSRef &inFile)
{
	Stop();
	CancelAndDisposeBuffers();
	
	delete mFile;   mFile = NULL;
	mFile = new CAAudioFile;
	mFile->Open(inFile);
	
	const CAStreamBasicDescription &fileFmt = mFile->GetFileDataFormat();
	CAStreamBasicDescription iofmt;
	iofmt.SetCanonical(fileFmt.mChannelsPerFrame, false);	// deinterleaved
	iofmt.mSampleRate = fileFmt.mSampleRate;
	mFile->SetClientFormat(iofmt, NULL);
	
	SetFormat(iofmt);
}
예제 #13
0
void	ZKMORFileWriter::CreateFile(const FSRef &parentDir, CFStringRef filename, AudioFileTypeID filetype, const CAStreamBasicDescription &dataFormat, const CAAudioChannelLayout *layout)
{
	RemoveFromWorkerThread();
	
	FlushAndClose();
	DisposeBuffers();
	
	delete mFile;   mFile = NULL;
	mFile = new CAAudioFile;
	mFile->CreateNew(parentDir, filename, filetype, dataFormat, layout ? &layout->Layout() : NULL);
	
	const CAStreamBasicDescription &fileFmt = mFile->GetFileDataFormat();
	CAStreamBasicDescription iofmt;
	iofmt.SetCanonical(fileFmt.mChannelsPerFrame, false);	// deinterleaved
	iofmt.mSampleRate = fileFmt.mSampleRate;
	SetClientDataFormat(iofmt);
	AddToWorkerThread();
}
예제 #14
0
ComponentResult	ElCAJAS::ChangeStreamFormat(AudioUnitScope inScope,
        AudioUnitElement inElement,
        const CAStreamBasicDescription& inPrevFormat,
        const CAStreamBasicDescription& inNewFormat)
{
    if (inScope == 1) {
        int reqChans = inNewFormat.NumberChannels();
        if (reqChans > 2 || reqChans < 1)
            return kAudioUnitErr_FormatNotSupported;
        else
            return noErr;
    } else if (inScope == 2) {
        int reqChans = inNewFormat.NumberChannels();
        if (reqChans != 2)
            return kAudioUnitErr_FormatNotSupported;
        else
            return noErr;
    }
    return kAudioUnitErr_FormatNotSupported;
}
예제 #15
0
void	ZKMORFileReader::SetFile(const FSRef &inFile)
{
	RemoveFromWorkerThread();
	DisposeBuffers();
	mIsPrimed = false;
	
	delete mFile, mFile = NULL;
	mFile = new CAAudioFile;
	mFile->Open(inFile);
	
	
	mNumberOfFrames = mFile->GetNumberFrames();
	
	const CAStreamBasicDescription &fileFmt = mFile->GetFileDataFormat();
	CAStreamBasicDescription iofmt;
	iofmt.SetCanonical(fileFmt.mChannelsPerFrame, false);	// deinterleaved
	iofmt.mSampleRate = fileFmt.mSampleRate;
	SynchronousSetClientDataFormat(iofmt);
	AddToWorkerThread();
}
예제 #16
0
void			WaveformViewDemo::AllocateBuffers()
{
	if (mAudioBuffer) delete (mAudioBuffer);
	mAudioBuffer = new CARingBuffer();
	mAudioBuffer->Allocate(GetNumberOfChannels(), sizeof(Float32), kDefaultValue_BufferSize); 
	// unlike the spectral buffers we write one number at a time, the spectral ones do entire analysis at a time

	CAStreamBasicDescription	bufClientDesc;		
	bufClientDesc.SetCanonical(GetNumberOfChannels(), false);
	bufClientDesc.mSampleRate = GetSampleRate();

	if (mFetchingBufferList) {
		mFetchingBufferList->DeallocateBuffers();
		delete(mFetchingBufferList);
	}
	mFetchingBufferList = CABufferList::New("fetch buffer", bufClientDesc );
	mFetchingBufferList->AllocateBuffers(sizeof(Float32) * kDefaultValue_BufferSize);
	
	
	
	memset (&mRenderStamp, 0, sizeof(AudioTimeStamp));
	mRenderStamp.mFlags = kAudioTimeStampSampleTimeValid;
		
}
예제 #17
0
파일: AUBuffer.cpp 프로젝트: EQ4/JamomaMax
// this should NOT be called while I/O is in process
void		AUBufferList::UseExternalBuffer(const CAStreamBasicDescription &format, const AudioUnitExternalBuffer &buf)
{
	UInt32 alignedSize = buf.size & ~0xF;
	if (mMemory != NULL && alignedSize >= mAllocatedBytes) {
		// don't accept the buffer if we already have one and it's big enough
		// if we don't already have one, we don't need one
		Byte *oldMemory = mMemory;
		mMemory = buf.buffer;
		mAllocatedBytes = alignedSize;
		// from Allocate(): nBytes = nStreams * nFrames * format.mBytesPerFrame;	
		// thus: nFrames = nBytes / (nStreams * format.mBytesPerFrame)
		mAllocatedFrames = mAllocatedBytes / (format.NumberChannelStreams() * format.mBytesPerFrame);
		mExternalMemory = true;
		free(oldMemory);
	}
}
예제 #18
0
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//	AUInstrumentBase::ValidFormat
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bool				AUInstrumentBase::ValidFormat(	AudioUnitScope					inScope,
													AudioUnitElement				inElement,
													const CAStreamBasicDescription  & inNewFormat)
{	
		// if the AU supports this, then we should just let this go through to the Init call
	if (SupportedNumChannels (NULL)) 
		return MusicDeviceBase::ValidFormat(inScope, inElement, inNewFormat);

	bool isGood = MusicDeviceBase::ValidFormat (inScope, inElement, inNewFormat);
	if (!isGood) return false;
	
		// if we get to here, then the basic criteria is that the
		// num channels cannot change on an existing bus
	AUIOElement *el = GetIOElement (inScope, inElement);
	return (el->GetStreamFormat().NumberChannels() == inNewFormat.NumberChannels()); 
}
예제 #19
0
int SetupRemoteIO (AudioUnit& inRemoteIOUnit, AURenderCallbackStruct inRenderProc, CAStreamBasicDescription& outFormat)
{	
	try {		
		// Open the output unit
		AudioComponentDescription desc;
		desc.componentType = kAudioUnitType_Output;
		desc.componentSubType = kAudioUnitSubType_RemoteIO;
		desc.componentManufacturer = kAudioUnitManufacturer_Apple;
		desc.componentFlags = 0;
		desc.componentFlagsMask = 0;
		
		AudioComponent comp = AudioComponentFindNext(NULL, &desc);
		
		XThrowIfError(AudioComponentInstanceNew(comp, &inRemoteIOUnit), "couldn't open the remote I/O unit");

		UInt32 one = 1;
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "couldn't enable input on the remote I/O unit");
	
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &inRenderProc, sizeof(inRenderProc)), "couldn't set remote i/o render callback");
		
        // NEWL: Establecer el formato canónico no de los AudioUnits sino del sistema de entrada/salida:
        //       LPCM, no entrelazado, datos enteros con signo de 16 bits.
        // outFormat.SetCanonical(2, false);
        
        // OLDL: set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point
        outFormat.SetAUCanonical(2, false);
		outFormat.mSampleRate = SAMPRATE;
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's output client format");
		XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's input client format");

		XThrowIfError(AudioUnitInitialize(inRemoteIOUnit), "couldn't initialize the remote I/O unit");
	}
	catch (CAXException &e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
		return 1;
	}
	catch (...) {
		fprintf(stderr, "An unknown error occurred\n");
		return 1;
	}	
	
	return 0;
}
예제 #20
0
// soundsource overrides
int SoundSourceCoreAudio::open() {
    //m_file.open(QIODevice::ReadOnly);

    //Open the audio file.
    OSStatus err;

	//QUrl blah(m_qFilename);
    QString qurlStr = m_qFilename;//blah.toString();
    qDebug() << qurlStr;

    /** This code blocks works with OS X 10.5+ only. DO NOT DELETE IT for now. */
    CFStringRef urlStr = CFStringCreateWithCharacters(0,
   				reinterpret_cast<const UniChar *>(
                qurlStr.unicode()), qurlStr.size());
    CFURLRef urlRef = CFURLCreateWithFileSystemPath(NULL, urlStr, kCFURLPOSIXPathStyle, false);
    err = ExtAudioFileOpenURL(urlRef, &m_audioFile);
    CFRelease(urlStr);
    CFRelease(urlRef);

    /** TODO: Use FSRef for compatibility with 10.4 Tiger.
        Note that ExtAudioFileOpen() is deprecated above Tiger, so we must maintain
        both code paths if someone finishes this part of the code.
    FSRef fsRef;
    CFURLGetFSRef(reinterpret_cast<CFURLRef>(url.get()), &fsRef);
    err = ExtAudioFileOpen(&fsRef, &m_audioFile);
    */

	if (err != noErr)
	{
		qDebug() << "SSCA: Error opening file.";
		return ERR;
	}

    // get the input file format
    CAStreamBasicDescription inputFormat;
    UInt32 size = sizeof(inputFormat);
    m_inputFormat = inputFormat;
    err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileDataFormat, &size, &inputFormat);
	if (err != noErr)
	{
		qDebug() << "SSCA: Error getting file format";
		return ERR;
	}

    //Debugging:
    //printf ("Source File format: "); inputFormat.Print();
    //printf ("Dest File format: "); outputFormat.Print();


	// create the output format
	CAStreamBasicDescription outputFormat;
    bzero(&outputFormat, sizeof(AudioStreamBasicDescription));
	outputFormat.mFormatID = kAudioFormatLinearPCM;
	outputFormat.mSampleRate = inputFormat.mSampleRate;
	outputFormat.mChannelsPerFrame = 2;
	outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger;

	/*
	switch(inputFormat.mBitsPerChannel) {
		case 16:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_16BitSourceData;
			break;
		case 20:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_20BitSourceData;
			break;
		case 24:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_24BitSourceData;
			break;
		case 32:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_32BitSourceData;
			break;
	}*/

    // get and set the client format - it should be lpcm
    CAStreamBasicDescription clientFormat = (inputFormat.mFormatID == kAudioFormatLinearPCM ? inputFormat : outputFormat);
	clientFormat.mBytesPerPacket = 4;
	clientFormat.mFramesPerPacket = 1;
	clientFormat.mBytesPerFrame = 4;
	clientFormat.mChannelsPerFrame = 2;
	clientFormat.mBitsPerChannel = 16;
	clientFormat.mReserved = 0;
	m_clientFormat = clientFormat;
    size = sizeof(clientFormat);

    err = ExtAudioFileSetProperty(m_audioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
	if (err != noErr)
	{
		qDebug() << "SSCA: Error setting file property";
		return ERR;
	}

	//Set m_iChannels and m_samples;
	m_iChannels = clientFormat.NumberChannels();

	//get the total length in frames of the audio file - copypasta: http://discussions.apple.com/thread.jspa?threadID=2364583&tstart=47
	UInt32		dataSize;
	SInt64		totalFrameCount;
	dataSize	= sizeof(totalFrameCount); //XXX: This looks sketchy to me - Albert
	err			= ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileLengthFrames, &dataSize, &totalFrameCount);
	if (err != noErr)
	{
		qDebug() << "SSCA: Error getting number of frames";
		return ERR;
	}

      //
      // WORKAROUND for bug in ExtFileAudio
      //

      AudioConverterRef acRef;
      UInt32 acrsize=sizeof(AudioConverterRef);
      err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_AudioConverter, &acrsize, &acRef);
      //_ThrowExceptionIfErr(@"kExtAudioFileProperty_AudioConverter", err);

      AudioConverterPrimeInfo primeInfo;
      UInt32 piSize=sizeof(AudioConverterPrimeInfo);
      memset(&primeInfo, 0, piSize);
      err = AudioConverterGetProperty(acRef, kAudioConverterPrimeInfo, &piSize, &primeInfo);
      if(err != kAudioConverterErr_PropertyNotSupported) // Only if decompressing
      {
         //_ThrowExceptionIfErr(@"kAudioConverterPrimeInfo", err);

         m_headerFrames=primeInfo.leadingFrames;
      }

	m_samples = (totalFrameCount/*-m_headerFrames*/)*m_iChannels;
	m_iDuration = m_samples / (inputFormat.mSampleRate * m_iChannels);
	m_iSampleRate = inputFormat.mSampleRate;
	qDebug() << m_samples << totalFrameCount << m_iChannels;

	//Seek to position 0, which forces us to skip over all the header frames.
	//This makes sure we're ready to just let the Analyser rip and it'll
	//get the number of samples it expects (ie. no header frames).
	seek(0);

    return OK;
}
예제 #21
0
// _______________________________________________________________________________________
//
// called to create the file -- or update its format/channel layout/properties based on an encoder
// setting change
void	CAAudioFile::FileFormatChanged(const FSRef *parentDir, CFStringRef filename, AudioFileTypeID filetype)
{
	LOG_FUNCTION("CAAudioFile::FileFormatChanged", "%p", this);
	XThrowIf(mMode != kPreparingToCreate && mMode != kPreparingToWrite, kExtAudioFileError_InvalidOperationOrder, "new file not prepared");

	UInt32 propertySize;
	OSStatus err;
	AudioStreamBasicDescription saveFileDataFormat = mFileDataFormat;

#if VERBOSE_CONVERTER
	mFileDataFormat.PrintFormat(stdout, "", "Specified file data format");
#endif

	// Find out the actual format the converter will produce. This is necessary in
	// case the bitrate has forced a lower sample rate, which needs to be set correctly
	// in the stream description passed to AudioFileCreate.
	if (mConverter != NULL) {
		propertySize = sizeof(AudioStreamBasicDescription);
		Float64 origSampleRate = mFileDataFormat.mSampleRate;
		XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterCurrentOutputStreamDescription, &propertySize, &mFileDataFormat), "get audio converter's output stream description");
		// do the same for the channel layout being output by the converter
#if VERBOSE_CONVERTER
		mFileDataFormat.PrintFormat(stdout, "", "Converter output");
#endif
		if (fiszero(mFileDataFormat.mSampleRate))
			mFileDataFormat.mSampleRate = origSampleRate;
		err = AudioConverterGetPropertyInfo(mConverter, kAudioConverterOutputChannelLayout, &propertySize, NULL);
		if (err == noErr && propertySize > 0) {
			AudioChannelLayout *layout = static_cast<AudioChannelLayout *>(malloc(propertySize));
			err = AudioConverterGetProperty(mConverter, kAudioConverterOutputChannelLayout, &propertySize, layout);
			if (err) {
				free(layout);
				XThrow(err, "couldn't get audio converter's output channel layout");
			}
			mFileChannelLayout = layout;
#if VERBOSE_CHANNELMAP
			printf("got new file's channel layout from converter: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
			free(layout);
		}
	}

	// create the output file
	if (mMode == kPreparingToCreate) {
		CAStreamBasicDescription newFileDataFormat = mFileDataFormat;
		if (fiszero(newFileDataFormat.mSampleRate))
			newFileDataFormat.mSampleRate = 44100;	// just make something up for now
#if VERBOSE_CONVERTER
		newFileDataFormat.PrintFormat(stdout, "", "Applied to new file");
#endif
		XThrowIfError(AudioFileCreate(parentDir, filename, filetype, &newFileDataFormat, 0, &mFSRef, &mAudioFile), "create audio file");
		mMode = kPreparingToWrite;
		mOwnOpenFile = true;
	} else if (saveFileDataFormat != mFileDataFormat || fnotequal(saveFileDataFormat.mSampleRate, mFileDataFormat.mSampleRate)) {
		// second check must be explicit since operator== on ASBD treats SR of zero as "don't care"
		if (fiszero(mFileDataFormat.mSampleRate))
			mFileDataFormat.mSampleRate = mClientDataFormat.mSampleRate;
#if VERBOSE_CONVERTER
		mFileDataFormat.PrintFormat(stdout, "", "Applied to new file");
#endif
		XThrowIf(fiszero(mFileDataFormat.mSampleRate), kExtAudioFileError_InvalidDataFormat, "file's sample rate is 0");
		XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyDataFormat, sizeof(AudioStreamBasicDescription), &mFileDataFormat), "couldn't update file's data format");
	}

	UInt32 deferSizeUpdates = 1;
	err = AudioFileSetProperty(mAudioFile, kAudioFilePropertyDeferSizeUpdates, sizeof(UInt32), &deferSizeUpdates);

	if (mConverter != NULL) {
		// encoder
		// get the magic cookie, if any, from the converter
		delete[] mMagicCookie;	mMagicCookie = NULL;
		mMagicCookieSize = 0;

		err = AudioConverterGetPropertyInfo(mConverter, kAudioConverterCompressionMagicCookie, &propertySize, NULL);

		// we can get a noErr result and also a propertySize == 0
		// -- if the file format does support magic cookies, but this file doesn't have one.
		if (err == noErr && propertySize > 0) {
			mMagicCookie = new Byte[propertySize];
			XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterCompressionMagicCookie, &propertySize, mMagicCookie), "get audio converter's magic cookie");
			mMagicCookieSize = propertySize;	// the converter lies and tell us the wrong size
			// now set the magic cookie on the output file
			UInt32 willEatTheCookie = false;
			// the converter wants to give us one; will the file take it?
			err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyMagicCookieData,
					NULL, &willEatTheCookie);
			if (err == noErr && willEatTheCookie) {
#if VERBOSE_CONVERTER
				printf("Setting cookie on encoded file\n");
#endif
				XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyMagicCookieData, mMagicCookieSize, mMagicCookie), "set audio file's magic cookie");
			}
		}

		// get maximum packet size
		propertySize = sizeof(UInt32);
		XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterPropertyMaximumOutputPacketSize, &propertySize, &mFileMaxPacketSize), "get audio converter's maximum output packet size");

		AllocateBuffers(true /* okToFail */);
	} else {
		InitFileMaxPacketSize();
	}

	if (mFileChannelLayout.IsValid() && mFileChannelLayout.NumberChannels() > 2) {
		// don't bother tagging mono/stereo files
		UInt32 isWritable;
		err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyChannelLayout, NULL, &isWritable);
		if (!err && isWritable) {
#if VERBOSE_CHANNELMAP
			printf("writing file's channel layout: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
			err = AudioFileSetProperty(mAudioFile, kAudioFilePropertyChannelLayout,
				mFileChannelLayout.Size(), &mFileChannelLayout.Layout());
			if (err)
				CAXException::Warning("could not set the file's channel layout", err);
		} else {
#if VERBOSE_CHANNELMAP
			printf("file won't accept a channel layout (write)\n");
#endif
		}
	}

	UpdateClientMaxPacketSize();	// also sets mFrame0Offset
	mPacketMark = 0;
	mFrameMark = 0;
}
예제 #22
0
// _______________________________________________________________________________________
//
void	CAAudioFile::SetClientFormat(const CAStreamBasicDescription &dataFormat, const CAAudioChannelLayout *layout)
{
	LOG_FUNCTION("CAAudioFile::SetClientFormat", "%p", this);
	XThrowIf(!dataFormat.IsPCM(), kExtAudioFileError_NonPCMClientFormat, "non-PCM client format on audio file");

	bool dataFormatChanging = (mClientDataFormat.mFormatID == 0 || mClientDataFormat != dataFormat);

	if (dataFormatChanging) {
		CloseConverter();
		if (mWriteBufferList) {
			delete mWriteBufferList;
			mWriteBufferList = NULL;
		}
		mClientDataFormat = dataFormat;
	}

	if (layout && layout->IsValid()) {
		XThrowIf(layout->NumberChannels() != mClientDataFormat.NumberChannels(), kExtAudioFileError_InvalidChannelMap, "inappropriate channel map");
		mClientChannelLayout = *layout;
	}

	bool differentLayouts;
	if (mClientChannelLayout.IsValid()) {
		if (mFileChannelLayout.IsValid()) {
			differentLayouts = mClientChannelLayout.Tag() != mFileChannelLayout.Tag();
#if VERBOSE_CHANNELMAP
			printf("two valid layouts, %s\n", differentLayouts ? "different" : "same");
#endif
		} else {
			differentLayouts = false;
#if VERBOSE_CHANNELMAP
			printf("valid client layout, unknown file layout\n");
#endif
		}
	} else {
		differentLayouts = false;
#if VERBOSE_CHANNELMAP
		if (mFileChannelLayout.IsValid())
			printf("valid file layout, unknown client layout\n");
		else
			printf("two invalid layouts\n");
#endif
	}

	if (mClientDataFormat != mFileDataFormat || differentLayouts) {
		// We need an AudioConverter.
		if (mMode == kReading) {
			// file -> client (decode)
//mFileDataFormat.PrintFormat(  stdout, "", "File:   ");
//mClientDataFormat.PrintFormat(stdout, "", "Client: ");

			if (mConverter == NULL)
				XThrowIfError(AudioConverterNew(&mFileDataFormat, &mClientDataFormat, &mConverter),
				"create audio converter");

#if VERBOSE_CONVERTER
			printf("CAAudioFile %p -- created converter\n", this);
			CAShow(mConverter);
#endif
			// set the magic cookie, if any (for decode)
			if (mMagicCookie)
				SetConverterProperty(kAudioConverterDecompressionMagicCookie, mMagicCookieSize, mMagicCookie, mFileDataFormat.IsPCM());
					// we get cookies from some AIFF's but the converter barfs on them,
					// so we set canFail to true for PCM

			SetConverterChannelLayout(false, mFileChannelLayout);
			SetConverterChannelLayout(true, mClientChannelLayout);

			// propagate leading/trailing frame counts
			if (mFileDataFormat.mBitsPerChannel == 0) {
				UInt32 propertySize;
				OSStatus err;
				AudioFilePacketTableInfo pti;
				propertySize = sizeof(pti);
				err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, &propertySize, &pti);
				if (err == noErr && (pti.mPrimingFrames > 0 || pti.mRemainderFrames > 0)) {
					AudioConverterPrimeInfo primeInfo;
					primeInfo.leadingFrames = pti.mPrimingFrames;
					primeInfo.trailingFrames = pti.mRemainderFrames;
					/* ignore any error. better to play it at all than not. */
					/*err = */AudioConverterSetProperty(mConverter, kAudioConverterPrimeInfo, sizeof(primeInfo), &primeInfo);
					//XThrowIfError(err, "couldn't set prime info on converter");
				}
			}
		} else if (mMode == kPreparingToCreate || mMode == kPreparingToWrite) {
			// client -> file (encode)
			if (mConverter == NULL)
				XThrowIfError(AudioConverterNew(&mClientDataFormat, &mFileDataFormat, &mConverter), "create audio converter");
			mWriteBufferList = CABufferList::New("", mClientDataFormat);
			SetConverterChannelLayout(false, mClientChannelLayout);
			SetConverterChannelLayout(true, mFileChannelLayout);
			if (mMode == kPreparingToWrite)
				FileFormatChanged();
		} else
			XThrowIfError(kExtAudioFileError_InvalidOperationOrder, "audio file format not yet known");
	}
	UpdateClientMaxPacketSize();
}
예제 #23
0
int main (int argc, const char * argv[]) 
{
	char* filePath = NULL;
	bool overwrite = false;
	ComponentDescription	compDesc = {0, 0, 0, 0, 0};
	AudioFileID inputFileID = 0;
	AudioFileID outputFileID = 0;
	CAStreamBasicDescription desc;
	AudioUnit theUnit = 0;
	
	setbuf (stdout, NULL);
	
	for (int i = 1; i < argc; ++i)
	{
		if (strcmp (argv[i], "-u") == 0) {
            if ( (i + 3) < argc ) {                
                compDesc.componentType = str2OSType (argv[i + 1]);
                compDesc.componentSubType = str2OSType (argv[i + 2]);
                compDesc.componentManufacturer = str2OSType (argv[i + 3]);
				Component comp = FindNextComponent (NULL, &compDesc);
				if (comp == NULL)
					break;
				OpenAComponent (comp, &theUnit);
				i += 3;
			} else {
				printf ("Which Component:\n%s", usageStr);
				return -1;
			}
		}
		else if (strcmp (argv[i], "-f") == 0) {
			filePath = const_cast<char*>(argv[++i]);
			printf ("Input File:%s\n", filePath);
		}
		else if (strcmp (argv[i], "-o") == 0) {
			overwrite = true;
		}
		else {
			printf ("%s\n", usageStr);
			return -1;
		}
	}
	
	if (compDesc.componentType == 0) {
		printf ("Must specify AU:\n%s\n", usageStr);
		return -1;
	}
	
	if (theUnit == 0) {
		printf ("Can't find specified unit\n");
		return -1;
	}
	
	if (filePath == NULL) {
		printf ("Must specify file to process:\n%s\n", usageStr);
		return -1;
	}
	
	OSStatus result = 0;
	if (result = InputFile (filePath, inputFileID)) {
		printf ("Result = %ld, parsing input file, exit...\n", result);
		return result;
	}
			
		
	UInt32 fileType;
	UInt32 size = sizeof (fileType);
	result = AudioFileGetProperty (inputFileID, kAudioFilePropertyFileFormat, &size, &fileType);
	if (result) {
		printf ("Error getting File Type of input file:%ld, exit...\n", result);
		return result;
	}
	size = sizeof (desc);
	result = AudioFileGetProperty (inputFileID, kAudioFilePropertyDataFormat, &size, &desc);
	if (result) {
		printf ("Error getting File Format of input file:%ld, exit...\n", result);
		return result;
	}
	if (desc.IsPCM() == false) {
		printf ("Only processing linear PCM file types and data:\n");
		desc.Print();
		return -1;
	}
	result = OutputFile (filePath, fileType, compDesc.componentSubType, overwrite, desc, outputFileID);
	if (result) {
		printf ("Error creating output file:%ld, exit...\n", result);
		return result;
	}	
	
// at this point we're ready to process	
	return Process (theUnit, compDesc, inputFileID, desc, outputFileID);
}
예제 #24
0
OSStatus		CAAUProcessor::DoInitialisation (const CAStreamBasicDescription 	&inInputFormat,
												const CAStreamBasicDescription 		&inOutputFormat,
												UInt64								inNumInputSamples,
												UInt32 								inMaxFrames)
{
	OSStatus result;
	
	if (inNumInputSamples == 0 && IsOfflineAU())
		return kAudioUnitErr_InvalidOfflineRender;
		
	mNumInputSamples = inNumInputSamples;
	
		// first check that we can do this number of channels
	if (mUnit.CanDo (inInputFormat.NumberChannels(), inOutputFormat.NumberChannels()) == false)
		ca_require_noerr (result = kAudioUnitErr_FailedInitialization, home);
	
	// just uninitialise the AU as a matter of course
	ca_require_noerr (result = mUnit.Uninitialize(), home);

	ca_require_noerr (result = mUnit.SetFormat (kAudioUnitScope_Input, 0, inInputFormat), home); 
	ca_require_noerr (result = mUnit.SetFormat (kAudioUnitScope_Output, 0, inOutputFormat), home); 
	ca_require_noerr (result = SetMaxFramesPerRender (inMaxFrames), home);
	
#if !TARGET_OS_IPHONE
		// if we're any AU but an offline AU, we should tell it that we've processing offline
	if (!IsOfflineAU()) {
		UInt32 isOffline = (IsOfflineContext() ? 1 : 0);
			// don't care whether this succeeds of fails as many AU's don't care about this
			// but the ones that do its important that they are told their render context
		mUnit.SetProperty (kAudioUnitProperty_OfflineRender, kAudioUnitScope_Global, 0, &isOffline, sizeof(isOffline));
	} else {
			// tell the offline unit how many input samples we wish to process...
		mUnit.SetProperty (kAudioUnitOfflineProperty_InputSize,
												kAudioUnitScope_Global, 0,
												&mNumInputSamples, sizeof(mNumInputSamples));
	}
#endif
	
	ca_require_noerr (result = mUnit.Initialize(), home);

	ca_require_noerr (result = SetInputCallback (mUnit, mUserCallback), home);
	
	// finally reset our time stamp
	// the time stamp we use with the AU Render - only sample count is valid
	memset (&mRenderTimeStamp, 0, sizeof(mRenderTimeStamp));
	mRenderTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;

	// now, if we're NOT an offline AU, preflighting is not required
	// if we are an offline AU, we should preflight.. an offline AU will tell us when its preflighting is done
	mPreflightDone = false;

	if (mPreflightABL) {
		delete mPreflightABL;
		mPreflightABL = NULL;
	}
	
	mPreflightABL = new AUOutputBL (inOutputFormat);

	mLastPercentReported = 0;
	
home:
	return result;
}
예제 #25
0
int main(int argc, const char * argv[])
{
#if TARGET_OS_MAC
	{
		thread_extended_policy_data_t		theFixedPolicy;
		theFixedPolicy.timeshare = false;	// set to true for a non-fixed thread
		thread_policy_set(pthread_mach_thread_np(pthread_self()), 
													THREAD_EXTENDED_POLICY, 
													(thread_policy_t)&theFixedPolicy, 
													THREAD_EXTENDED_POLICY_COUNT);

		// We keep a reference to the spawning thread's priority around (initialized in the constructor), 
		// and set the importance of the child thread relative to the spawning thread's priority.
		thread_precedence_policy_data_t		thePrecedencePolicy;
		
		thePrecedencePolicy.importance = 63 - 36;
		thread_policy_set(pthread_mach_thread_np(pthread_self()), 
													THREAD_PRECEDENCE_POLICY, 
													(thread_policy_t)&thePrecedencePolicy, 
													THREAD_PRECEDENCE_POLICY_COUNT);
	}
#endif


// These are the variables that are set up from the input parsing
	char* srcFilePath = NULL;
	char* auPresetFile = NULL;
	OSType manu, subType, type = 0;
	UInt32 numFrames = 4096;
	
	for (int i = 1; i < argc; ++i)
	{
		if (strcmp (argv[i], "-au") == 0) {
            if ( (i + 3) < argc ) {                
                StrToOSType (argv[i + 1], type);
                StrToOSType (argv[i + 2], subType);
                StrToOSType (argv[i + 3], manu);
				i += 3;
			} else {
				printf ("Which Audio Unit:\n%s", usageStr);
				exit(1);
			}
		}
		else if (strcmp (argv[i], "-i") == 0) {
			srcFilePath = const_cast<char*>(argv[++i]);
		}
		else if (strcmp (argv[i], "-p") == 0) {
			auPresetFile = const_cast<char*>(argv[++i]);
		}
		else if (strcmp (argv[i], "-f") == 0) {
			sscanf(argv[++i], "%ld", &numFrames);
		}
		else {
			printf ("%s\n", usageStr);
			exit(1);
		}
	}
	
	if (!type || !srcFilePath) {
		printf ("%s\n", usageStr);
		exit(1);
	}
	
	CAComponentDescription desc(type, subType, manu);
	
	CFPropertyListRef presetDict = ReadPresetFromPresetFile(auPresetFile);
	
#pragma mark -
#pragma mark __ The driving code
#pragma mark -

	try 
	{
		CAComponent comp(desc);
			
			 // CAAUProcessor's constructor throws... so make sure the component is valid
		if (comp.IsValid() == false) {
			printf ("Can't Find Component\n");
			desc.Print();
			exit(1);
		}
			
		CAAUProcessor processor(comp);
										processor.AU().Comp().Print();
		
		CAAudioFile srcFile;
		
		srcFile.Open(srcFilePath);


		UInt64 numInputSamples = srcFile.GetNumberFrames();

		Float64 inputSecs = (numInputSamples / srcFile.GetFileDataFormat().mSampleRate);
		
		CAStreamBasicDescription procFormat (srcFile.GetFileDataFormat());
		procFormat.SetCanonical (srcFile.GetFileDataFormat().NumberChannels(), false);

										printf ("Processing file: %s, %.1f secs [proc: %ld frames]\n", srcFilePath, inputSecs, numFrames);
										#if VERBOSE
											printf("\t");
											procFormat.Print();
										#endif
		
		srcFile.SetClientFormat (procFormat);
		
		AUOutputBL outputList(procFormat);
	
		// read the entire file into memory
		ReadBuffer* readBuf = new ReadBuffer;
		readBuf->readData = new AUOutputBL(procFormat);
		readBuf->totalInputFrames = numInputSamples;
		readBuf->readData->Allocate (numInputSamples);
		readBuf->readData->Prepare();
		UInt32 readSamps = (UInt32)numInputSamples;
		srcFile.Read (readSamps, readBuf->readData->ABL());
			
		AURenderCallbackStruct inputCallback;
		inputCallback.inputProc = MemoryInputCallback;
		inputCallback.inputProcRefCon = readBuf;
				
		OSStatus result;
		require_noerr (result = processor.EstablishInputCallback (inputCallback), home);
		require_noerr (result = processor.SetMaxFramesPerRender (numFrames), home); 
		require_noerr (result = processor.Initialize (procFormat, numInputSamples), home);
		if (presetDict) {
			require_noerr (result = processor.SetAUPreset (presetDict), home);
			CFRelease (presetDict);
		}
			// this does ALL of the preflighting.. could be specialise for an OfflineAU type
			// to do this piecemeal and do a progress bar by using the OfflineAUPreflight method
		readBuf->lastInputFrames = 0;
		require_noerr (result = processor.Preflight (), home);
	
	float mean;
	
	// now do the processing....
	{
		const int kThrasherSize = 4000000;
		char* thrasher = new char[kThrasherSize];
		
		bool isDone = false;
		
		UInt32 numMeasures = 0;
		Float64 totalMSqrd = 0;
		Float64 totalM = 0;
		
		int i = 0;
		int discardResults = 3;
						
			// this is the render loop
		while (!isDone) 
		{
			bool isSilence, postProcess;

			outputList.Prepare(); // have to do this every time...
			readBuf->lastInputFrames = 0;
			sLastReadTime = 0;
			memset (thrasher, numMeasures, kThrasherSize);

												UInt64 now = CAHostTimeBase::GetTheCurrentTime(); 
			require_noerr (result = processor.Render (outputList.ABL(), numFrames, isSilence, &isDone, &postProcess), home);
												UInt64 renderTime = (CAHostTimeBase::GetTheCurrentTime() - now);
			
			if (i++ < discardResults) continue;
			if (!readBuf->lastInputFrames) break;

			Float64 renderTimeSecs = CAHostTimeBase::ConvertToNanos (renderTime - sLastReadTime) / 1.0e9;
			
			Float64 cpuTime = (renderTimeSecs / (readBuf->lastInputFrames / procFormat.mSampleRate)) * 100.;
			numMeasures++;
			
			totalMSqrd += (cpuTime * cpuTime);
			totalM += cpuTime;

			if (cpuTime > sMaxTime)
				sMaxTime = cpuTime;
			if (cpuTime < sMinTime)
				sMinTime = cpuTime;
									
#if VERBOSE
//			printf ("current measure: %.2f\n", cpuTime);
			if (numMeasures % 5 == 0) {
				Float64 mean = totalM / numMeasures;				
					//	stdDev = (sum of Xsquared -((sum of X)*(sum of X)/N)) / (N-1))
				Float64 stdDev = sqrt ((totalMSqrd - ((totalM * totalM) / numMeasures)) / (numMeasures-1.0));
				printf ("ave: %.2f, min: %.2f, max: %.2f, stdev: %.2f, numMeasures: %ld, current: %f\n", 
					mean, sMinTime, sMaxTime, stdDev, numMeasures, cpuTime);
			}
#endif
		}
		delete [] thrasher;
		
		mean = totalM / numMeasures;
			//	stdDev = (sum of Xsquared -((sum of X)*(sum of X)/N)) / (N-1))
		Float64 stdDev = sqrt ((totalMSqrd - ((totalM * totalM) / numMeasures)) / (numMeasures-1.0));

		printf ("ave: %.2f, min: %.2f, max: %.2f, sd: %.2f, sd / mean: %.2f%%\n", 
			mean, sMinTime, sMaxTime, stdDev, (stdDev / mean * 100.));
	}

		// we don't care about post-processing

home:
		if (result) {
			printf ("Exit with bad result:%ld\n", result);
			exit(result);
		}
		
		if (readBuf) {
			delete readBuf->readData;
			delete readBuf;
		}
		

		CFStringRef str = comp.GetCompName();
		UInt32 compNameLen = CFStringGetLength (str);
		
		CFStringRef presetName = NULL;
		if (auPresetFile) {
			CFPropertyListRef dict;
			if (processor.AU().GetAUPreset (dict) == noErr) {
				presetName = (CFStringRef)CFDictionaryGetValue((CFDictionaryRef)dict, CFSTR("name"));
				CFRelease (dict);
			}
		}

		UInt32 presetLen = presetName ? CFStringGetLength(presetName) : 0;
		
		UInt32 groupID = comp.Desc().componentSubType;
		
		char* cstr = (char*)malloc (compNameLen + presetLen + 2 + 1);
		CFStringGetCString (str, cstr, (CFStringGetLength (str) + 1), kCFStringEncodingASCII);
		if (presetName) {
			cstr[compNameLen] = ':';
			cstr[compNameLen+1] = ':';
			CFStringGetCString (presetName, cstr + compNameLen + 2, (CFStringGetLength (presetName) + 1), kCFStringEncodingASCII);
			int len = strlen(cstr);
			for (int i = 0; i < len; ++i)
				groupID += cstr[i];
		}
		PerfResult("AU Profile", EndianU32_NtoB(groupID), cstr, mean, "%realtime");
		free (cstr);

	}
	catch (CAXException &e) {
		char buf[256];
		printf("Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
		exit(1);
	}
	catch (...) {
		printf("An unknown error occurred\n");
		exit(1);
	}
			
	return 0;
}
예제 #26
0
void	CAAudioFileConverter::GenerateOutputFileName(const char *inputFilePath, 
						const CAStreamBasicDescription &inputFormat,
						const CAStreamBasicDescription &outputFormat, OSType outputFileType, 
						char *outName)
{
	struct stat sb;
	char inputDir[256];
	char inputBasename[256];
	
	strcpy(inputDir, dirname(inputFilePath));
	const char *infname = basename(inputFilePath);
	const char *inext = strrchr(infname, '.');
	if (inext == NULL) strcpy(inputBasename, infname);
	else {
		int n;
		memcpy(inputBasename, infname, n = inext - infname);
		inputBasename[n] = '\0';
	}
	
	CFArrayRef exts;
	UInt32 propSize = sizeof(exts);
	XThrowIfError(AudioFileGetGlobalInfo(kAudioFileGlobalInfo_ExtensionsForType,
		sizeof(OSType), &outputFileType, &propSize, &exts), "generate output file name");
	char outputExt[32];
	CFStringRef cfext = (CFStringRef)CFArrayGetValueAtIndex(exts, 0);
	CFStringGetCString(cfext, outputExt, sizeof(outputExt), kCFStringEncodingUTF8);
	CFRelease(exts);
	
	// 1. olddir + oldname + newext
	sprintf(outName, "%s/%s.%s", inputDir, inputBasename, outputExt);
#if TARGET_OS_MAC	
	if (lstat(outName, &sb)) return;
#else
	if (stat(outName, &sb)) return;
#endif

	if (outputFormat.IsPCM()) {
		// If sample rate changed:
		//	2. olddir + oldname + "-SR" + newext
		if (inputFormat.mSampleRate != outputFormat.mSampleRate && outputFormat.mSampleRate != 0.) {
			sprintf(outName, "%s/%s-%.0fk.%s", inputDir, inputBasename, outputFormat.mSampleRate/1000., outputExt);
#if TARGET_OS_MAC	
			if (lstat(outName, &sb)) return;
#else
			if (stat(outName, &sb)) return;
#endif
		}
		// If bit depth changed:
		//	3. olddir + oldname + "-bit" + newext
		if (inputFormat.mBitsPerChannel != outputFormat.mBitsPerChannel) {
			sprintf(outName, "%s/%s-%ldbit.%s", inputDir, inputBasename, outputFormat.mBitsPerChannel, outputExt);
#if TARGET_OS_MAC	
			if (lstat(outName, &sb)) return;
#else
			if (stat(outName, &sb)) return;
#endif
		}
	}
	
	// maybe more with channels/layouts? $$$
	
	// now just append digits
	for (int i = 1; ; ++i) {
		sprintf(outName, "%s/%s-%d.%s", inputDir, inputBasename, i, outputExt);
#if TARGET_OS_MAC	
		if (lstat(outName, &sb)) return;
#else
		if (stat(outName, &sb)) return;
#endif
	}
}
예제 #27
0
void	CAAudioFileConverter::ConvertFile(const ConversionParameters &_params)
{
	FSRef destFSRef;
	UInt32 propertySize;
	CAStreamBasicDescription destFormat;
	CAAudioChannelLayout origSrcFileLayout, srcFileLayout, destFileLayout;
	bool openedSourceFile = false, createdOutputFile = false;
	
	mParams = _params;
	mReadBuffer = NULL;
	mReadPtrs = NULL;
	CABufferList *writeBuffer = NULL;
	CABufferList *writePtrs = NULL;
	
	PrepareConversion();

	try {
		if (TaggedDecodingFromCAF())
			ReadCAFInfo();
		OpenInputFile();
		openedSourceFile = true;
		
		// get input file's format
		const CAStreamBasicDescription &srcFormat = mSrcFile.GetFileDataFormat();
		if (mParams.flags & kOpt_Verbose) {
			printf("Input file: %s, %qd frames\n", mParams.input.filePath ? basename(mParams.input.filePath) : "?", 
				mSrcFile.GetNumberFrames());
		}
		mSrcFormat = srcFormat;
		
		bool encoding = !destFormat.IsPCM();
		bool decoding = !srcFormat.IsPCM();
		
		// prepare output file's format
		destFormat = mParams.output.dataFormat;

		if (!encoding && destFormat.mSampleRate == 0.)
			// on encode, it's OK to have a 0 sample rate; ExtAudioFile will get the SR from the converter and set it on the file.
			// on decode or PCM->PCM, a sample rate of 0 is interpreted as using the source sample rate
			destFormat.mSampleRate = srcFormat.mSampleRate;
		
		// source channel layout
		srcFileLayout = mSrcFile.GetFileChannelLayout();
		origSrcFileLayout = srcFileLayout;
		if (mParams.input.channelLayoutTag != 0) {
			XThrowIf(AudioChannelLayoutTag_GetNumberOfChannels(mParams.input.channelLayoutTag)
				!= srcFormat.mChannelsPerFrame, -1, "input channel layout has wrong number of channels for file");
			srcFileLayout = CAAudioChannelLayout(mParams.input.channelLayoutTag);
			mSrcFile.SetFileChannelLayout(srcFileLayout);
		}
		
		// destination channel layout
		int outChannels = mParams.output.channels;
		if (mParams.output.channelLayoutTag != 0) {
			// use the one specified by caller, if any
			destFileLayout = CAAudioChannelLayout(mParams.output.channelLayoutTag);
		} else if (srcFileLayout.IsValid()) {
			// otherwise, assume the same as the source, if any
			destFileLayout = srcFileLayout;
		}
		if (destFileLayout.IsValid()) {
			// the output channel layout specifies the number of output channels
			if (outChannels != -1)
				XThrowIf((unsigned)outChannels != destFileLayout.NumberChannels(), -1,
					"output channel layout has wrong number of channels");
			else
				outChannels = destFileLayout.NumberChannels();
		}

		if (!(mParams.flags & kOpt_NoSanitizeOutputFormat)) {
			// adjust the output format's channels; output.channels overrides the channels
			if (outChannels == -1)
				outChannels = srcFormat.mChannelsPerFrame;
			if (outChannels > 0) {
				destFormat.mChannelsPerFrame = outChannels;
				destFormat.mBytesPerPacket *= outChannels;
				destFormat.mBytesPerFrame *= outChannels;
			}
		
			// use AudioFormat API to clean up the output format
			propertySize = sizeof(AudioStreamBasicDescription);
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propertySize, &destFormat),
					"get destination format info");
		}
		OpenOutputFile(srcFormat, destFormat, destFSRef, destFileLayout);
		createdOutputFile = true;
		mDestFormat = destFormat;
		
		// set up client formats
		CAStreamBasicDescription srcClientFormat, destClientFormat;
		{
			CAAudioChannelLayout srcClientLayout, destClientLayout;
			
			if (encoding) {
				if (decoding) {
					// transcoding
//					XThrowIf(encoding && decoding, -1, "transcoding not currently supported");
					
					if (srcFormat.mChannelsPerFrame > 2 || destFormat.mChannelsPerFrame > 2)
						CAXException::Warning("Transcoding multichannel audio may not handle channel layouts correctly", 0);
					srcClientFormat.SetCanonical(std::min(srcFormat.mChannelsPerFrame, destFormat.mChannelsPerFrame), true);
					srcClientFormat.mSampleRate = std::max(srcFormat.mSampleRate, destFormat.mSampleRate);
					mSrcFile.SetClientFormat(srcClientFormat, NULL);
					
					destClientFormat = srcClientFormat;
				} else {
					// encoding
					srcClientFormat = srcFormat;
					destClientFormat = srcFormat;
				}
				// by here, destClientFormat will have a valid sample rate
				destClientLayout = srcFileLayout.IsValid() ? srcFileLayout : destFileLayout;

				mDestFile.SetClientFormat(destClientFormat, &destClientLayout);
			} else {
				// decoding or PCM->PCM
				if (destFormat.mSampleRate == 0.)
					destFormat.mSampleRate = srcFormat.mSampleRate;
		
				destClientFormat = destFormat;
				srcClientFormat = destFormat;
				srcClientLayout = destFileLayout;
				
				mSrcFile.SetClientFormat(srcClientFormat, &srcClientLayout);
			}
		}
		
		XThrowIf(srcClientFormat.mBytesPerPacket == 0, -1, "source client format not PCM"); 
		XThrowIf(destClientFormat.mBytesPerPacket == 0, -1, "dest client format not PCM"); 		
		if (encoding) {
			// set the bitrate
			if (mParams.output.bitRate != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("bitrate = %ld\n", mParams.output.bitRate);
				mDestFile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &mParams.output.bitRate);
			}

			// set the codec quality
			if (mParams.output.codecQuality != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("codec quality = %ld\n", mParams.output.codecQuality);
				mDestFile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &mParams.output.codecQuality);
			}

			// set the bitrate strategy -- called bitrate format in the codecs since it had already shipped
			if (mParams.output.strategy != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("strategy = %ld\n", mParams.output.strategy);
				mDestFile.SetConverterProperty(kAudioCodecBitRateFormat, sizeof(UInt32), &mParams.output.strategy);
			}
		}
		// set the SRC quality
		if (mParams.output.srcQuality != -1) {
			if (srcFormat.mSampleRate != 0. && destFormat.mSampleRate != 0. && srcFormat.mSampleRate != destFormat.mSampleRate) {
				if (mParams.flags & kOpt_Verbose)
					printf("SRC quality = %ld\n", mParams.output.srcQuality);
				if (encoding)
					mDestFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
				else
					mSrcFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
			}
		}
		if (decoding) {
			if (mParams.output.primeMethod != -1)
				mSrcFile.SetConverterProperty(kAudioConverterPrimeMethod, sizeof(UInt32), &mParams.output.primeMethod);
		}

		PrintFormats(&origSrcFileLayout);

		// prepare I/O buffers
		UInt32 bytesToRead = 0x10000;
		UInt32 framesToRead = bytesToRead;	// OK, ReadPackets will limit as appropriate
		ComputeReadSize(srcFormat, destFormat, bytesToRead, framesToRead);

//		const SInt64 totalFrames = mSrcFile.GetNumberFrames();
//#warning "GetNumberFrames() can be prohibitively slow for some formats"
		
		mReadBuffer = CABufferList::New("readbuf", srcClientFormat);
		mReadBuffer->AllocateBuffers(bytesToRead);
		mReadPtrs = CABufferList::New("readptrs", srcClientFormat);
		
		BeginConversion();
		
		while (true) {
			//XThrowIf(Progress(mSrcFile.Tell(), totalFrames), userCanceledErr, "user stopped");
				// this was commented out for awhile -- performance? make it optional?
			UInt32 nFrames = framesToRead;
			mReadPtrs->SetFrom(mReadBuffer);
			AudioBufferList *readbuf = &mReadPtrs->GetModifiableBufferList();
			
			mSrcFile.Read(nFrames, readbuf);
			//printf("read %ld of %ld frames\n", nFrames, framesToRead);
			if (nFrames == 0)
				break;

			mDestFile.Write(nFrames, readbuf);
			if (ShouldTerminateConversion())
				break;
		}
		
		if (decoding) {
			// fix up the destination file's length if necessary and possible
			SInt64 nframes = mSrcFile.GetNumberFrames();
			if (nframes != 0) {
				// only shorten, don't try to lengthen
				nframes = SInt64(ceil(nframes * destFormat.mSampleRate / srcFormat.mSampleRate));
				if (nframes < mDestFile.GetNumberFrames()) {
					mDestFile.SetNumberFrames(nframes);
				}
			}
		}
		EndConversion();
	}
	catch (...) {
		delete mReadBuffer;
		delete mReadPtrs;
		delete writeBuffer;
		delete writePtrs;
		if (!createdOutputFile)
			PrintFormats(&origSrcFileLayout);
		try { mSrcFile.Close(); } catch (...) { }
		try { mDestFile.Close(); } catch (...) { }
		if (createdOutputFile)
			unlink(mOutName);
		throw;
	}
	delete mReadBuffer;
	delete mReadPtrs;
	delete writeBuffer;
	delete writePtrs;
	mSrcFile.Close();
	mDestFile.Close();
	if (TaggedEncodingToCAF())
		WriteCAFInfo();
	
	if (mParams.flags & kOpt_Verbose) {
		// must close to flush encoder; GetNumberFrames() not necessarily valid until afterwards but then
		// the file is closed
		CAAudioFile temp;
		FSRef destFSRef;
		if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) {
			temp.Open(destFSRef);
			printf("Output file: %s, %qd frames\n", basename(mOutName), temp.GetNumberFrames());
		}
	}
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate) 
{
    ExtAudioFileRef sourceFile = 0;
    ExtAudioFileRef destinationFile = 0;
    Boolean         canResumeFromInterruption = true; // we can continue unless told otherwise
    OSStatus        error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    ThreadStateSetRunning();
    
    printf("DoConvertFile\n");
    
	try {
        CAStreamBasicDescription srcFormat, dstFormat;

        // open the source file
        XThrowIfError(ExtAudioFileOpenURL(sourceURL, &sourceFile), "ExtAudioFileOpenURL failed");
			
        // get the source data format
		UInt32 size = sizeof(srcFormat);
		XThrowIfError(ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &size, &srcFormat), "couldn't get source data format");
		
		printf("\nSource file format: "); srcFormat.Print();

        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if PCM was selected as the destination format, create a 16-bit int PCM file format description
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("\nDestination file format: "); dstFormat.Print();
        
        // create the destination file 
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, NULL, kAudioFileFlags_EraseFile, &destinationFile), "ExtAudioFileCreateWithURL failed!");

        // set the client format - The format must be linear PCM (kAudioFormatLinearPCM)
        // You must set this in order to encode or decode a non-PCM file data format
        // You may set this on PCM files to specify the data format used in your calls to read/write
        CAStreamBasicDescription clientFormat;
        if (outputFormat == kAudioFormatLinearPCM) {
            clientFormat = dstFormat;
        } else {
            clientFormat.SetCanonical(srcFormat.NumberChannels(), true);
            clientFormat.mSampleRate = srcFormat.mSampleRate;
        }
        
        printf("\nClient data format: "); clientFormat.Print();
        printf("\n");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set source client format");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(destinationFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set destination client format");

        // can the audio converter (which in this case is owned by an ExtAudioFile object) resume conversion after an interruption?
        AudioConverterRef audioConverter;
                    
        size = sizeof(audioConverter);
        XThrowIfError(ExtAudioFileGetProperty(destinationFile, kExtAudioFileProperty_AudioConverter, &size, &audioConverter), "Couldn't get Audio Converter!");
        
        // this property may be queried at any time after construction of the audio converter (which in this case is owned by an ExtAudioFile object)
        // after setting the output format -- there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(audioConverter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported!\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld\n", error);
            }
            
            error = noErr;
        }
        
        // set up buffers
        UInt32 bufferByteSize = 32768;
        char srcBuffer[bufferByteSize];
        
        // keep track of the source file offset so we know where to reset the source for
        // reading if interrupted and input was not consumed by the audio converter
        SInt64 sourceFrameOffset = 0;
        
        //***** do the read and write - the conversion is done on and by the write call *****//
        printf("Converting...\n");
        while (1) {
        
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = clientFormat.NumberChannels();
            fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
            fillBufList.mBuffers[0].mData = srcBuffer;
                
            // client format is always linear PCM - so here we determine how many frames of lpcm
            // we can read/write given our buffer size
            UInt32 numFrames;
            if (clientFormat.mBytesPerFrame > 0) // rids bogus analyzer div by zero warning mBytesPerFrame can't be 0 and is protected by an Assert
                numFrames = clientFormat.BytesToFrames(bufferByteSize); // (bufferByteSize / clientFormat.mBytesPerFrame);

            XThrowIfError(ExtAudioFileRead(sourceFile, &numFrames, &fillBufList), "ExtAudioFileRead failed!");	
            if (!numFrames) {
                // this is our termination condition
                error = noErr;
                break;
            }
            sourceFrameOffset += numFrames;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = ThreadStatePausedCheck();
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the audio converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            error = ExtAudioFileWrite(destinationFile, numFrames, &fillBufList);
            // if interrupted in the process of the write call, we must handle the errors appropriately
            if (error) {
                if (kExtAudioFileError_CodecUnavailableInputConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was successfully
                        consumed and you may proceed to the next buffer
                    */
                    
                } else if (kExtAudioFileError_CodecUnavailableInputNotConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputNotConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was not
                        successfully consumed and you must try to write it again
                    */
                    
                    // seek back to last offset before last read so we can try again after the interruption
                    sourceFrameOffset -= numFrames;
                    XThrowIfError(ExtAudioFileSeek(sourceFile, sourceFrameOffset), "ExtAudioFileSeek failed!");
                    
                } else {
                    XThrowIfError(error, "ExtAudioFileWrite error!");
                }
            } // if
        } // while
	}
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // close
    if (destinationFile) ExtAudioFileDispose(destinationFile);
    if (sourceFile) ExtAudioFileDispose(sourceFile);

    // transition thread state to kStateDone before continuing
    ThreadStateSetDone();
    
    return error;
}
int AudioDecoderCoreAudio::open() {
    
    //Open the audio file.
    OSStatus err;

    /** This code blocks works with OS X 10.5+ only. DO NOT DELETE IT for now. */
    /*CFStringRef urlStr = CFStringCreateWithCharacters(0,
   				reinterpret_cast<const UniChar *>(
                //qurlStr.unicode()), qurlStr.size());
                m_filename.data()), m_filename.size());
                */
    CFStringRef urlStr = CFStringCreateWithCString(kCFAllocatorDefault, 
                                                   m_filename.c_str(), 
                                                   kCFStringEncodingUTF8);
                                                   //CFStringGetSystemEncoding());

    CFURLRef urlRef = CFURLCreateWithFileSystemPath(NULL, urlStr, kCFURLPOSIXPathStyle, false);
    err = ExtAudioFileOpenURL(urlRef, &m_audioFile);
    CFRelease(urlStr);
    CFRelease(urlRef);

    /** TODO: Use FSRef for compatibility with 10.4 Tiger. 
        Note that ExtAudioFileOpen() is deprecated above Tiger, so we must maintain
        both code paths if someone finishes this part of the code.
    FSRef fsRef;
    CFURLGetFSRef(reinterpret_cast<CFURLRef>(url.get()), &fsRef);
    err = ExtAudioFileOpen(&fsRef, &m_audioFile);
    */

	if (err != noErr)
	{
        std::cerr << "AudioDecoderCoreAudio: Error opening file." << std::endl;
		return AUDIODECODER_ERROR;
	}

    // get the input file format
    CAStreamBasicDescription inputFormat;
    UInt32 size = sizeof(inputFormat);
    err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileDataFormat, &size, &inputFormat);
	if (err != noErr)
	{
        std::cerr << "AudioDecoderCoreAudio: Error getting file format." << std::endl;
		return AUDIODECODER_ERROR;
	}    
    m_inputFormat = inputFormat;
    
	// create the output format
	CAStreamBasicDescription outputFormat;
    bzero(&outputFormat, sizeof(AudioStreamBasicDescription));
	outputFormat.mFormatID = kAudioFormatLinearPCM;
	outputFormat.mSampleRate = inputFormat.mSampleRate;
	outputFormat.mChannelsPerFrame = 2;
    outputFormat.mFormatFlags = kAudioFormatFlagsCanonical;  
    //kAudioFormatFlagsCanonical means Native endian, float, packed on Mac OS X, 
    //but signed int for iOS instead.

    //Note iPhone/iOS only supports signed integers supposedly:
    outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger;
	
    //Debugging:
    //printf ("Source File format: "); inputFormat.Print();
    //printf ("Dest File format: "); outputFormat.Print();


	/*
	switch(inputFormat.mBitsPerChannel) {
		case 16:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_16BitSourceData;
			break;
		case 20:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_20BitSourceData;
			break;
		case 24:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_24BitSourceData;
			break;
		case 32:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_32BitSourceData;
			break;
	}*/

    // get and set the client format - it should be lpcm
    CAStreamBasicDescription clientFormat = outputFormat; //We're always telling the OS to do the conversion to floats for us now
	clientFormat.mChannelsPerFrame = 2;
	clientFormat.mBytesPerFrame = sizeof(SAMPLE)*clientFormat.mChannelsPerFrame;
	clientFormat.mBitsPerChannel = sizeof(SAMPLE)*8; //16 for signed int, 32 for float;
	clientFormat.mFramesPerPacket = 1;
	clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame*clientFormat.mFramesPerPacket;
	clientFormat.mReserved = 0;
	m_clientFormat = clientFormat;
    size = sizeof(clientFormat);
    
    err = ExtAudioFileSetProperty(m_audioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
	if (err != noErr)
	{
		//qDebug() << "SSCA: Error setting file property";
        std::cerr << "AudioDecoderCoreAudio: Error setting file property." << std::endl;
		return AUDIODECODER_ERROR;
	}
	
	//Set m_iChannels and m_iNumSamples;
	m_iChannels = clientFormat.NumberChannels();

	//get the total length in frames of the audio file - copypasta: http://discussions.apple.com/thread.jspa?threadID=2364583&tstart=47
	UInt32		dataSize;
	SInt64		totalFrameCount;		
	dataSize	= sizeof(totalFrameCount); //XXX: This looks sketchy to me - Albert
	err			= ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileLengthFrames, &dataSize, &totalFrameCount);
	if (err != noErr)
	{
        std::cerr << "AudioDecoderCoreAudio: Error getting number of frames." << std::endl;
		return AUDIODECODER_ERROR;
	}

      //
      // WORKAROUND for bug in ExtFileAudio
      //
      
      AudioConverterRef acRef;
      UInt32 acrsize=sizeof(AudioConverterRef);
      err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_AudioConverter, &acrsize, &acRef);
      //_ThrowExceptionIfErr(@"kExtAudioFileProperty_AudioConverter", err);

      AudioConverterPrimeInfo primeInfo;
      UInt32 piSize=sizeof(AudioConverterPrimeInfo);
      memset(&primeInfo, 0, piSize);
      err = AudioConverterGetProperty(acRef, kAudioConverterPrimeInfo, &piSize, &primeInfo);
      if(err != kAudioConverterErr_PropertyNotSupported) // Only if decompressing
      {
         //_ThrowExceptionIfErr(@"kAudioConverterPrimeInfo", err);
         
         m_headerFrames=primeInfo.leadingFrames;
      }
	
	m_iNumSamples = (totalFrameCount/*-m_headerFrames*/)*m_iChannels;
	m_iSampleRate = inputFormat.mSampleRate;
	m_fDuration = m_iNumSamples / static_cast<float>(m_iSampleRate * m_iChannels);
	
    //Convert mono files into stereo
    if (inputFormat.NumberChannels() == 1)
    {
        SInt32 channelMap[2] = {0, 0}; // array size should match the number of output channels
        AudioConverterSetProperty(acRef, kAudioConverterChannelMap, 
                                    sizeof(channelMap), channelMap);
    }

	//Seek to position 0, which forces us to skip over all the header frames.
	//This makes sure we're ready to just let the Analyser rip and it'll
	//get the number of samples it expects (ie. no header frames).
	seek(0);

    return AUDIODECODER_OK;
}
예제 #30
0
void	CAChannelMappingPlayer::SetupChannelMapping()
{
	delete mMapper;
	mMapper = NULL;
	
	const CAStreamBasicDescription &fileFormat = GetFile().GetClientDataFormat();
	CAStreamBasicDescription deviceFormat;
	UInt32 propertySize = sizeof(AudioStreamBasicDescription);
	
	XThrowIfError(AudioUnitGetProperty(
							GetOutputUnit(),
							kAudioUnitProperty_StreamFormat,
							kAudioUnitScope_Output,
							0,
							(void *)&deviceFormat,
							&propertySize), "get output device's format");

#if VERBOSE
	printf("CAChannelMappingPlayer::SetupChannelMapping: %ld-ch file, %ld-ch device\n",
		fileFormat.mChannelsPerFrame, deviceFormat.mChannelsPerFrame);
#endif

	if (fileFormat.mChannelsPerFrame <= deviceFormat.mChannelsPerFrame) {
		// no mapping needed, use output unit's default behavior 
		// (default stereo pair and speaker config from AMS)
#if VERBOSE
		printf("  using output unit's channel mapping\n");
#endif
		CAAudioFilePlayer::SetupChannelMapping();
	} else {
		// fewer device than file channels, mapping needed
		CAAudioChannelLayout fileLayout, deviceLayout;
		
#if VERBOSE
		printf("  using our own channel mapping\n");
#endif
		deviceFormat.mSampleRate = fileFormat.mSampleRate;
		deviceFormat.SetCanonical(deviceFormat.mChannelsPerFrame, false);	// force deinterleaved
		
		fileLayout = GetFile().GetFileChannelLayout();

		UInt32 layoutSize;
		Boolean writable;
		OSStatus err = AudioUnitGetPropertyInfo(
								GetOutputUnit(),
								kAudioUnitProperty_AudioChannelLayout,
								kAudioUnitScope_Input,
								0,
								&layoutSize,
								&writable);
		if (!err) {
			char *buf = (char *)malloc(layoutSize);
			err = AudioUnitGetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_AudioChannelLayout,
								kAudioUnitScope_Input,
								0,
								buf,
								&layoutSize);
			deviceLayout = CAAudioChannelLayout(reinterpret_cast<AudioChannelLayout *>(buf));
			free(buf);
		}
		mMapper = new CAChannelMapper(fileFormat, deviceFormat, &fileLayout, &deviceLayout);

		// give the output unit the same number of channels as in the device, 
		// since we'll be doing the mapping ourselves
		XThrowIfError(AudioUnitSetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_StreamFormat,
								kAudioUnitScope_Input,
								0,
								(void *)&deviceFormat,
								sizeof(AudioStreamBasicDescription)), "set audio output format");
		
		XThrowIfError(mMapper->OpenMixer(fileFormat.mSampleRate), "open mixer");
		XThrowIfError(mMapper->ConfigureDownmix(), "configure downmix");
		
		AudioUnitConnection conn;
		conn.sourceAudioUnit = mMapper->GetMixer();
		conn.sourceOutputNumber = 0;
		conn.destInputNumber = 0;

		XThrowIfError(AudioUnitSetProperty(
								GetOutputUnit(),
								kAudioUnitProperty_MakeConnection,
								kAudioUnitScope_Global,
								0,
								(void *)&conn,
								sizeof(AudioUnitConnection)), "connect mixer to output unit");
		
		AURenderCallbackStruct input;
		input.inputProc = InputProc;
		input.inputProcRefCon = this;
		XThrowIfError(AudioUnitSetProperty(
									conn.sourceAudioUnit, 
									kAudioUnitProperty_SetRenderCallback, 
									kAudioUnitScope_Global,
									0,
									&input, 
									sizeof(input)), "connect input proc to mixer");
		// provide NO channel layout
//		mReadBuf = CABufferList::New("", fileFormat);
//		mReadBuf->AllocateBuffers(
	}
}