Ejemplo n.º 1
0
/*! @method ChangeStreamFormat */
ComponentResult		AUPannerBase::ChangeStreamFormat (
									AudioUnitScope						inScope,
									AudioUnitElement					inElement,
									const CAStreamBasicDescription & 	inPrevFormat,
									const CAStreamBasicDescription &	inNewFormat)
{
	if (inScope == kAudioUnitScope_Input && !InputChannelConfigIsSupported(inNewFormat.NumberChannels())) 
		return kAudioUnitErr_FormatNotSupported;
		
	if (inScope == kAudioUnitScope_Output && !OutputChannelConfigIsSupported(inNewFormat.NumberChannels())) 
		return kAudioUnitErr_FormatNotSupported;
		
	if (inNewFormat.NumberChannels() != inPrevFormat.NumberChannels())
		RemoveAudioChannelLayout(inScope, inElement);
		
	return AUBase::ChangeStreamFormat(inScope, inElement, inPrevFormat, inNewFormat);
}
Ejemplo n.º 2
0
ComponentResult	ElCAJAS::ChangeStreamFormat(AudioUnitScope inScope,
        AudioUnitElement inElement,
        const CAStreamBasicDescription& inPrevFormat,
        const CAStreamBasicDescription& inNewFormat)
{
    if (inScope == 1) {
        int reqChans = inNewFormat.NumberChannels();
        if (reqChans > 2 || reqChans < 1)
            return kAudioUnitErr_FormatNotSupported;
        else
            return noErr;
    } else if (inScope == 2) {
        int reqChans = inNewFormat.NumberChannels();
        if (reqChans != 2)
            return kAudioUnitErr_FormatNotSupported;
        else
            return noErr;
    }
    return kAudioUnitErr_FormatNotSupported;
}
Ejemplo n.º 3
0
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//	AUInstrumentBase::ValidFormat
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bool				AUInstrumentBase::ValidFormat(	AudioUnitScope					inScope,
													AudioUnitElement				inElement,
													const CAStreamBasicDescription  & inNewFormat)
{	
		// if the AU supports this, then we should just let this go through to the Init call
	if (SupportedNumChannels (NULL)) 
		return MusicDeviceBase::ValidFormat(inScope, inElement, inNewFormat);

	bool isGood = MusicDeviceBase::ValidFormat (inScope, inElement, inNewFormat);
	if (!isGood) return false;
	
		// if we get to here, then the basic criteria is that the
		// num channels cannot change on an existing bus
	AUIOElement *el = GetIOElement (inScope, inElement);
	return (el->GetStreamFormat().NumberChannels() == inNewFormat.NumberChannels()); 
}
Ejemplo n.º 4
0
OSStatus		CAAUProcessor::DoInitialisation (const CAStreamBasicDescription 	&inInputFormat,
												const CAStreamBasicDescription 		&inOutputFormat,
												UInt64								inNumInputSamples,
												UInt32 								inMaxFrames)
{
	OSStatus result;
	
	if (inNumInputSamples == 0 && IsOfflineAU())
		return kAudioUnitErr_InvalidOfflineRender;
		
	mNumInputSamples = inNumInputSamples;
	
		// first check that we can do this number of channels
	if (mUnit.CanDo (inInputFormat.NumberChannels(), inOutputFormat.NumberChannels()) == false)
		ca_require_noerr (result = kAudioUnitErr_FailedInitialization, home);
	
	// just uninitialise the AU as a matter of course
	ca_require_noerr (result = mUnit.Uninitialize(), home);

	ca_require_noerr (result = mUnit.SetFormat (kAudioUnitScope_Input, 0, inInputFormat), home); 
	ca_require_noerr (result = mUnit.SetFormat (kAudioUnitScope_Output, 0, inOutputFormat), home); 
	ca_require_noerr (result = SetMaxFramesPerRender (inMaxFrames), home);
	
#if !TARGET_OS_IPHONE
		// if we're any AU but an offline AU, we should tell it that we've processing offline
	if (!IsOfflineAU()) {
		UInt32 isOffline = (IsOfflineContext() ? 1 : 0);
			// don't care whether this succeeds of fails as many AU's don't care about this
			// but the ones that do its important that they are told their render context
		mUnit.SetProperty (kAudioUnitProperty_OfflineRender, kAudioUnitScope_Global, 0, &isOffline, sizeof(isOffline));
	} else {
			// tell the offline unit how many input samples we wish to process...
		mUnit.SetProperty (kAudioUnitOfflineProperty_InputSize,
												kAudioUnitScope_Global, 0,
												&mNumInputSamples, sizeof(mNumInputSamples));
	}
#endif
	
	ca_require_noerr (result = mUnit.Initialize(), home);

	ca_require_noerr (result = SetInputCallback (mUnit, mUserCallback), home);
	
	// finally reset our time stamp
	// the time stamp we use with the AU Render - only sample count is valid
	memset (&mRenderTimeStamp, 0, sizeof(mRenderTimeStamp));
	mRenderTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;

	// now, if we're NOT an offline AU, preflighting is not required
	// if we are an offline AU, we should preflight.. an offline AU will tell us when its preflighting is done
	mPreflightDone = false;

	if (mPreflightABL) {
		delete mPreflightABL;
		mPreflightABL = NULL;
	}
	
	mPreflightABL = new AUOutputBL (inOutputFormat);

	mLastPercentReported = 0;
	
home:
	return result;
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate) 
{
    ExtAudioFileRef sourceFile = 0;
    ExtAudioFileRef destinationFile = 0;
    Boolean         canResumeFromInterruption = true; // we can continue unless told otherwise
    OSStatus        error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    ThreadStateSetRunning();
    
    printf("DoConvertFile\n");
    
	try {
        CAStreamBasicDescription srcFormat, dstFormat;

        // open the source file
        XThrowIfError(ExtAudioFileOpenURL(sourceURL, &sourceFile), "ExtAudioFileOpenURL failed");
			
        // get the source data format
		UInt32 size = sizeof(srcFormat);
		XThrowIfError(ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &size, &srcFormat), "couldn't get source data format");
		
		printf("\nSource file format: "); srcFormat.Print();

        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if PCM was selected as the destination format, create a 16-bit int PCM file format description
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("\nDestination file format: "); dstFormat.Print();
        
        // create the destination file 
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, NULL, kAudioFileFlags_EraseFile, &destinationFile), "ExtAudioFileCreateWithURL failed!");

        // set the client format - The format must be linear PCM (kAudioFormatLinearPCM)
        // You must set this in order to encode or decode a non-PCM file data format
        // You may set this on PCM files to specify the data format used in your calls to read/write
        CAStreamBasicDescription clientFormat;
        if (outputFormat == kAudioFormatLinearPCM) {
            clientFormat = dstFormat;
        } else {
            clientFormat.SetCanonical(srcFormat.NumberChannels(), true);
            clientFormat.mSampleRate = srcFormat.mSampleRate;
        }
        
        printf("\nClient data format: "); clientFormat.Print();
        printf("\n");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set source client format");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(destinationFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set destination client format");

        // can the audio converter (which in this case is owned by an ExtAudioFile object) resume conversion after an interruption?
        AudioConverterRef audioConverter;
                    
        size = sizeof(audioConverter);
        XThrowIfError(ExtAudioFileGetProperty(destinationFile, kExtAudioFileProperty_AudioConverter, &size, &audioConverter), "Couldn't get Audio Converter!");
        
        // this property may be queried at any time after construction of the audio converter (which in this case is owned by an ExtAudioFile object)
        // after setting the output format -- there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(audioConverter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported!\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld\n", error);
            }
            
            error = noErr;
        }
        
        // set up buffers
        UInt32 bufferByteSize = 32768;
        char srcBuffer[bufferByteSize];
        
        // keep track of the source file offset so we know where to reset the source for
        // reading if interrupted and input was not consumed by the audio converter
        SInt64 sourceFrameOffset = 0;
        
        //***** do the read and write - the conversion is done on and by the write call *****//
        printf("Converting...\n");
        while (1) {
        
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = clientFormat.NumberChannels();
            fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
            fillBufList.mBuffers[0].mData = srcBuffer;
                
            // client format is always linear PCM - so here we determine how many frames of lpcm
            // we can read/write given our buffer size
            UInt32 numFrames;
            if (clientFormat.mBytesPerFrame > 0) // rids bogus analyzer div by zero warning mBytesPerFrame can't be 0 and is protected by an Assert
                numFrames = clientFormat.BytesToFrames(bufferByteSize); // (bufferByteSize / clientFormat.mBytesPerFrame);

            XThrowIfError(ExtAudioFileRead(sourceFile, &numFrames, &fillBufList), "ExtAudioFileRead failed!");	
            if (!numFrames) {
                // this is our termination condition
                error = noErr;
                break;
            }
            sourceFrameOffset += numFrames;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = ThreadStatePausedCheck();
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the audio converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            error = ExtAudioFileWrite(destinationFile, numFrames, &fillBufList);
            // if interrupted in the process of the write call, we must handle the errors appropriately
            if (error) {
                if (kExtAudioFileError_CodecUnavailableInputConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was successfully
                        consumed and you may proceed to the next buffer
                    */
                    
                } else if (kExtAudioFileError_CodecUnavailableInputNotConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputNotConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was not
                        successfully consumed and you must try to write it again
                    */
                    
                    // seek back to last offset before last read so we can try again after the interruption
                    sourceFrameOffset -= numFrames;
                    XThrowIfError(ExtAudioFileSeek(sourceFile, sourceFrameOffset), "ExtAudioFileSeek failed!");
                    
                } else {
                    XThrowIfError(error, "ExtAudioFileWrite error!");
                }
            } // if
        } // while
	}
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // close
    if (destinationFile) ExtAudioFileDispose(destinationFile);
    if (sourceFile) ExtAudioFileDispose(sourceFile);

    // transition thread state to kStateDone before continuing
    ThreadStateSetDone();
    
    return error;
}
int AudioDecoderCoreAudio::open() {
    
    //Open the audio file.
    OSStatus err;

    /** This code blocks works with OS X 10.5+ only. DO NOT DELETE IT for now. */
    /*CFStringRef urlStr = CFStringCreateWithCharacters(0,
   				reinterpret_cast<const UniChar *>(
                //qurlStr.unicode()), qurlStr.size());
                m_filename.data()), m_filename.size());
                */
    CFStringRef urlStr = CFStringCreateWithCString(kCFAllocatorDefault, 
                                                   m_filename.c_str(), 
                                                   kCFStringEncodingUTF8);
                                                   //CFStringGetSystemEncoding());

    CFURLRef urlRef = CFURLCreateWithFileSystemPath(NULL, urlStr, kCFURLPOSIXPathStyle, false);
    err = ExtAudioFileOpenURL(urlRef, &m_audioFile);
    CFRelease(urlStr);
    CFRelease(urlRef);

    /** TODO: Use FSRef for compatibility with 10.4 Tiger. 
        Note that ExtAudioFileOpen() is deprecated above Tiger, so we must maintain
        both code paths if someone finishes this part of the code.
    FSRef fsRef;
    CFURLGetFSRef(reinterpret_cast<CFURLRef>(url.get()), &fsRef);
    err = ExtAudioFileOpen(&fsRef, &m_audioFile);
    */

	if (err != noErr)
	{
        std::cerr << "AudioDecoderCoreAudio: Error opening file." << std::endl;
		return AUDIODECODER_ERROR;
	}

    // get the input file format
    CAStreamBasicDescription inputFormat;
    UInt32 size = sizeof(inputFormat);
    err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileDataFormat, &size, &inputFormat);
	if (err != noErr)
	{
        std::cerr << "AudioDecoderCoreAudio: Error getting file format." << std::endl;
		return AUDIODECODER_ERROR;
	}    
    m_inputFormat = inputFormat;
    
	// create the output format
	CAStreamBasicDescription outputFormat;
    bzero(&outputFormat, sizeof(AudioStreamBasicDescription));
	outputFormat.mFormatID = kAudioFormatLinearPCM;
	outputFormat.mSampleRate = inputFormat.mSampleRate;
	outputFormat.mChannelsPerFrame = 2;
    outputFormat.mFormatFlags = kAudioFormatFlagsCanonical;  
    //kAudioFormatFlagsCanonical means Native endian, float, packed on Mac OS X, 
    //but signed int for iOS instead.

    //Note iPhone/iOS only supports signed integers supposedly:
    outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger;
	
    //Debugging:
    //printf ("Source File format: "); inputFormat.Print();
    //printf ("Dest File format: "); outputFormat.Print();


	/*
	switch(inputFormat.mBitsPerChannel) {
		case 16:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_16BitSourceData;
			break;
		case 20:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_20BitSourceData;
			break;
		case 24:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_24BitSourceData;
			break;
		case 32:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_32BitSourceData;
			break;
	}*/

    // get and set the client format - it should be lpcm
    CAStreamBasicDescription clientFormat = outputFormat; //We're always telling the OS to do the conversion to floats for us now
	clientFormat.mChannelsPerFrame = 2;
	clientFormat.mBytesPerFrame = sizeof(SAMPLE)*clientFormat.mChannelsPerFrame;
	clientFormat.mBitsPerChannel = sizeof(SAMPLE)*8; //16 for signed int, 32 for float;
	clientFormat.mFramesPerPacket = 1;
	clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame*clientFormat.mFramesPerPacket;
	clientFormat.mReserved = 0;
	m_clientFormat = clientFormat;
    size = sizeof(clientFormat);
    
    err = ExtAudioFileSetProperty(m_audioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
	if (err != noErr)
	{
		//qDebug() << "SSCA: Error setting file property";
        std::cerr << "AudioDecoderCoreAudio: Error setting file property." << std::endl;
		return AUDIODECODER_ERROR;
	}
	
	//Set m_iChannels and m_iNumSamples;
	m_iChannels = clientFormat.NumberChannels();

	//get the total length in frames of the audio file - copypasta: http://discussions.apple.com/thread.jspa?threadID=2364583&tstart=47
	UInt32		dataSize;
	SInt64		totalFrameCount;		
	dataSize	= sizeof(totalFrameCount); //XXX: This looks sketchy to me - Albert
	err			= ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileLengthFrames, &dataSize, &totalFrameCount);
	if (err != noErr)
	{
        std::cerr << "AudioDecoderCoreAudio: Error getting number of frames." << std::endl;
		return AUDIODECODER_ERROR;
	}

      //
      // WORKAROUND for bug in ExtFileAudio
      //
      
      AudioConverterRef acRef;
      UInt32 acrsize=sizeof(AudioConverterRef);
      err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_AudioConverter, &acrsize, &acRef);
      //_ThrowExceptionIfErr(@"kExtAudioFileProperty_AudioConverter", err);

      AudioConverterPrimeInfo primeInfo;
      UInt32 piSize=sizeof(AudioConverterPrimeInfo);
      memset(&primeInfo, 0, piSize);
      err = AudioConverterGetProperty(acRef, kAudioConverterPrimeInfo, &piSize, &primeInfo);
      if(err != kAudioConverterErr_PropertyNotSupported) // Only if decompressing
      {
         //_ThrowExceptionIfErr(@"kAudioConverterPrimeInfo", err);
         
         m_headerFrames=primeInfo.leadingFrames;
      }
	
	m_iNumSamples = (totalFrameCount/*-m_headerFrames*/)*m_iChannels;
	m_iSampleRate = inputFormat.mSampleRate;
	m_fDuration = m_iNumSamples / static_cast<float>(m_iSampleRate * m_iChannels);
	
    //Convert mono files into stereo
    if (inputFormat.NumberChannels() == 1)
    {
        SInt32 channelMap[2] = {0, 0}; // array size should match the number of output channels
        AudioConverterSetProperty(acRef, kAudioConverterChannelMap, 
                                    sizeof(channelMap), channelMap);
    }

	//Seek to position 0, which forces us to skip over all the header frames.
	//This makes sure we're ready to just let the Analyser rip and it'll
	//get the number of samples it expects (ie. no header frames).
	seek(0);

    return AUDIODECODER_OK;
}
Ejemplo n.º 7
0
// soundsource overrides
int SoundSourceCoreAudio::open() {
    //m_file.open(QIODevice::ReadOnly);

    //Open the audio file.
    OSStatus err;

	//QUrl blah(m_qFilename);
    QString qurlStr = m_qFilename;//blah.toString();
    qDebug() << qurlStr;

    /** This code blocks works with OS X 10.5+ only. DO NOT DELETE IT for now. */
    CFStringRef urlStr = CFStringCreateWithCharacters(0,
   				reinterpret_cast<const UniChar *>(
                qurlStr.unicode()), qurlStr.size());
    CFURLRef urlRef = CFURLCreateWithFileSystemPath(NULL, urlStr, kCFURLPOSIXPathStyle, false);
    err = ExtAudioFileOpenURL(urlRef, &m_audioFile);
    CFRelease(urlStr);
    CFRelease(urlRef);

    /** TODO: Use FSRef for compatibility with 10.4 Tiger.
        Note that ExtAudioFileOpen() is deprecated above Tiger, so we must maintain
        both code paths if someone finishes this part of the code.
    FSRef fsRef;
    CFURLGetFSRef(reinterpret_cast<CFURLRef>(url.get()), &fsRef);
    err = ExtAudioFileOpen(&fsRef, &m_audioFile);
    */

	if (err != noErr)
	{
		qDebug() << "SSCA: Error opening file.";
		return ERR;
	}

    // get the input file format
    CAStreamBasicDescription inputFormat;
    UInt32 size = sizeof(inputFormat);
    m_inputFormat = inputFormat;
    err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileDataFormat, &size, &inputFormat);
	if (err != noErr)
	{
		qDebug() << "SSCA: Error getting file format";
		return ERR;
	}

    //Debugging:
    //printf ("Source File format: "); inputFormat.Print();
    //printf ("Dest File format: "); outputFormat.Print();


	// create the output format
	CAStreamBasicDescription outputFormat;
    bzero(&outputFormat, sizeof(AudioStreamBasicDescription));
	outputFormat.mFormatID = kAudioFormatLinearPCM;
	outputFormat.mSampleRate = inputFormat.mSampleRate;
	outputFormat.mChannelsPerFrame = 2;
	outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger;

	/*
	switch(inputFormat.mBitsPerChannel) {
		case 16:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_16BitSourceData;
			break;
		case 20:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_20BitSourceData;
			break;
		case 24:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_24BitSourceData;
			break;
		case 32:
			outputFormat.mFormatFlags =  kAppleLosslessFormatFlag_32BitSourceData;
			break;
	}*/

    // get and set the client format - it should be lpcm
    CAStreamBasicDescription clientFormat = (inputFormat.mFormatID == kAudioFormatLinearPCM ? inputFormat : outputFormat);
	clientFormat.mBytesPerPacket = 4;
	clientFormat.mFramesPerPacket = 1;
	clientFormat.mBytesPerFrame = 4;
	clientFormat.mChannelsPerFrame = 2;
	clientFormat.mBitsPerChannel = 16;
	clientFormat.mReserved = 0;
	m_clientFormat = clientFormat;
    size = sizeof(clientFormat);

    err = ExtAudioFileSetProperty(m_audioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
	if (err != noErr)
	{
		qDebug() << "SSCA: Error setting file property";
		return ERR;
	}

	//Set m_iChannels and m_samples;
	m_iChannels = clientFormat.NumberChannels();

	//get the total length in frames of the audio file - copypasta: http://discussions.apple.com/thread.jspa?threadID=2364583&tstart=47
	UInt32		dataSize;
	SInt64		totalFrameCount;
	dataSize	= sizeof(totalFrameCount); //XXX: This looks sketchy to me - Albert
	err			= ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_FileLengthFrames, &dataSize, &totalFrameCount);
	if (err != noErr)
	{
		qDebug() << "SSCA: Error getting number of frames";
		return ERR;
	}

      //
      // WORKAROUND for bug in ExtFileAudio
      //

      AudioConverterRef acRef;
      UInt32 acrsize=sizeof(AudioConverterRef);
      err = ExtAudioFileGetProperty(m_audioFile, kExtAudioFileProperty_AudioConverter, &acrsize, &acRef);
      //_ThrowExceptionIfErr(@"kExtAudioFileProperty_AudioConverter", err);

      AudioConverterPrimeInfo primeInfo;
      UInt32 piSize=sizeof(AudioConverterPrimeInfo);
      memset(&primeInfo, 0, piSize);
      err = AudioConverterGetProperty(acRef, kAudioConverterPrimeInfo, &piSize, &primeInfo);
      if(err != kAudioConverterErr_PropertyNotSupported) // Only if decompressing
      {
         //_ThrowExceptionIfErr(@"kAudioConverterPrimeInfo", err);

         m_headerFrames=primeInfo.leadingFrames;
      }

	m_samples = (totalFrameCount/*-m_headerFrames*/)*m_iChannels;
	m_iDuration = m_samples / (inputFormat.mSampleRate * m_iChannels);
	m_iSampleRate = inputFormat.mSampleRate;
	qDebug() << m_samples << totalFrameCount << m_iChannels;

	//Seek to position 0, which forces us to skip over all the header frames.
	//This makes sure we're ready to just let the Analyser rip and it'll
	//get the number of samples it expects (ie. no header frames).
	seek(0);

    return OK;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This currently will only work for cbr formats
OSStatus OALBuffer::ConvertDataForBuffer(void *inData, UInt32 inDataSize, UInt32	inDataFormat, UInt32 inDataSampleRate)
{
#if LOG_VERBOSE
	DebugMessageN5("OALBuffer::ConvertDataForBuffer() - OALBuffer:inData:inDataSize:inDataFormat:inDataSampleRate = %ld:%p:%ld:%ld:%ld", (long int) mSelfToken, inData, (long int) inDataSize, (long int) inDataFormat, (long int) inDataSampleRate);
#endif    
	OSStatus					result = noErr;

    try {

        AudioConverterRef			converter;
        CAStreamBasicDescription	destFormat;
        UInt32						framesOfSource = 0;

        if (inData == NULL)
            throw ((OSStatus) AL_INVALID_OPERATION);

        result = FillInASBD(mPreConvertedDataFormat, inDataFormat, inDataSampleRate);
            THROW_RESULT

        if (mPreConvertedDataFormat.NumberChannels() == 1)
            mPreConvertedDataFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved; 
                    
        destFormat.mChannelsPerFrame = mPreConvertedDataFormat.NumberChannels();
        destFormat.mSampleRate = mPreConvertedDataFormat.mSampleRate;
        destFormat.mFormatID = kAudioFormatLinearPCM;
        
        if (mPreConvertedDataFormat.NumberChannels() == 1)
            destFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
        else
            destFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked; // leave stereo data interleaved, and an AC will be used for deinterleaving later on
        
        destFormat.mFramesPerPacket = 1;	
        destFormat.mBitsPerChannel = sizeof (Float32) * 8;	
        destFormat.mBytesPerPacket = sizeof (Float32) * destFormat.NumberChannels();	
        destFormat.mBytesPerFrame = sizeof (Float32) * destFormat.NumberChannels();
        
        result = FillInASBD(mDataFormat, inDataFormat, UInt32(destFormat.mSampleRate));
            THROW_RESULT
            
        result = AudioConverterNew(&mPreConvertedDataFormat, &destFormat, &converter);
            THROW_RESULT

		framesOfSource = inDataSize / mPreConvertedDataFormat.mBytesPerFrame; // THIS ONLY WORKS FOR CBR FORMATS
			
       	UInt32 dataSize = framesOfSource * sizeof(Float32) * destFormat.NumberChannels();
        
		mDataSize = (UInt32) dataSize;

        if (mData != NULL)
        {
            if (mDataSize != dataSize)
            {
                mDataSize = dataSize;
                void *newDataPtr = realloc(mData, mDataSize);
                if (newDataPtr == NULL)
                    throw ((OSStatus) AL_INVALID_OPERATION);
                    
                mData = (UInt8 *) newDataPtr;
            }		
        }
        else
        {
            mDataSize = dataSize;
            mData = (UInt8 *) malloc (mDataSize);
            if (mData == NULL)
                throw ((OSStatus) AL_INVALID_OPERATION);
        }

        if (mData != NULL)
        {
			result = AudioConverterConvertBuffer(converter, inDataSize, inData, &mDataSize, mData);
			if (result == noErr)
            {
                mDataFormat.SetFrom(destFormat);
				if (mPreConvertedDataFormat.NumberChannels() == 1)
					mDataHasBeenConverted = true;
				else
					mDataHasBeenConverted = false;
			}
        }
        
        AudioConverterDispose(converter);
    }
    catch (OSStatus     result) {
        return (result);
    }
    catch (...) {
        result = (OSStatus) AL_INVALID_OPERATION;
    }
    
	return (result);
}
Ejemplo n.º 9
0
Archivo: main.cpp Proyecto: ebakan/SMS
void MakeSimpleGraph (AUGraph &theGraph, CAAudioUnit &fileAU, CAStreamBasicDescription &fileFormat, AudioFileID audioFile)
{
	XThrowIfError (NewAUGraph (&theGraph), "NewAUGraph");
	
	CAComponentDescription cd;

	// output node
	cd.componentType = kAudioUnitType_Output;
	cd.componentSubType = kAudioUnitSubType_DefaultOutput;
	cd.componentManufacturer = kAudioUnitManufacturer_Apple;

	AUNode outputNode;
	XThrowIfError (AUGraphAddNode (theGraph, &cd, &outputNode), "AUGraphAddNode");
	
	// file AU node
	AUNode fileNode;
	cd.componentType = kAudioUnitType_Generator;
	cd.componentSubType = kAudioUnitSubType_AudioFilePlayer;
	
	XThrowIfError (AUGraphAddNode (theGraph, &cd, &fileNode), "AUGraphAddNode");
	
	// connect & setup
	XThrowIfError (AUGraphOpen (theGraph), "AUGraphOpen");
	
	// install overload listener to detect when something is wrong
	AudioUnit anAU;
	XThrowIfError (AUGraphNodeInfo(theGraph, fileNode, NULL, &anAU), "AUGraphNodeInfo");
	
	fileAU = CAAudioUnit (fileNode, anAU);

// prepare the file AU for playback
// set its output channels
	XThrowIfError (fileAU.SetNumberChannels (kAudioUnitScope_Output, 0, fileFormat.NumberChannels()), "SetNumberChannels");

// set the output sample rate of the file AU to be the same as the file:
	XThrowIfError (fileAU.SetSampleRate (kAudioUnitScope_Output, 0, fileFormat.mSampleRate), "SetSampleRate");

// load in the file 
	XThrowIfError (fileAU.SetProperty(kAudioUnitProperty_ScheduledFileIDs, 
						kAudioUnitScope_Global, 0, &audioFile, sizeof(audioFile)), "SetScheduleFile");


	XThrowIfError (AUGraphConnectNodeInput (theGraph, fileNode, 0, outputNode, 0), "AUGraphConnectNodeInput");

// AT this point we make sure we have the file player AU initialized
// this also propogates the output format of the AU to the output unit
	XThrowIfError (AUGraphInitialize (theGraph), "AUGraphInitialize");
	
	// workaround a race condition in the file player AU
	usleep (10 * 1000);

// if we have a surround file, then we should try to tell the output AU what the order of the channels will be
	if (fileFormat.NumberChannels() > 2) {
		UInt32 layoutSize = 0;
		OSStatus err;
		XThrowIfError (err = AudioFileGetPropertyInfo (audioFile, kAudioFilePropertyChannelLayout, &layoutSize, NULL),
								"kAudioFilePropertyChannelLayout");
		
		if (!err && layoutSize) {
			char* layout = new char[layoutSize];
			
			err = AudioFileGetProperty(audioFile, kAudioFilePropertyChannelLayout, &layoutSize, layout);
			XThrowIfError (err, "Get Layout From AudioFile");
			
			// ok, now get the output AU and set its layout
			XThrowIfError (AUGraphNodeInfo(theGraph, outputNode, NULL, &anAU), "AUGraphNodeInfo");
			
			err = AudioUnitSetProperty (anAU, kAudioUnitProperty_AudioChannelLayout, 
							kAudioUnitScope_Input, 0, layout, layoutSize);
			XThrowIfError (err, "kAudioUnitProperty_AudioChannelLayout");
			
			delete [] layout;
		}
	}
}