Beispiel #1
0
// _______________________________________________________________________________________
//
void	CAAudioFile::UpdateClientMaxPacketSize()
{
	LOG_FUNCTION("CAAudioFile::UpdateClientMaxPacketSize", "%p", this);
	mFrame0Offset = 0;
	if (mConverter != NULL) {
		AudioConverterPropertyID property = (mMode == kReading) ?
			kAudioConverterPropertyMaximumOutputPacketSize :
			kAudioConverterPropertyMaximumInputPacketSize;

		UInt32 propertySize = sizeof(UInt32);
		XThrowIfError(AudioConverterGetProperty(mConverter, property, &propertySize, &mClientMaxPacketSize),
			"get audio converter's maximum packet size");

		if (mFileDataFormat.mBitsPerChannel == 0) {
			AudioConverterPrimeInfo primeInfo;
			propertySize = sizeof(primeInfo);
			OSStatus err = AudioConverterGetProperty(mConverter, kAudioConverterPrimeInfo, &propertySize, &primeInfo);
			if (err == noErr)
				mFrame0Offset = primeInfo.leadingFrames;
#if VERBOSE_CONVERTER
			printf("kAudioConverterPrimeInfo: err = %ld, leadingFrames = %ld\n", err, mFrame0Offset);
#endif
		}
	} else {
		mClientMaxPacketSize = mFileMaxPacketSize;
	}
}
Beispiel #2
0
static int ffat_update_ctx(AVCodecContext *avctx)
{
    ATDecodeContext *at = avctx->priv_data;
    AudioStreamBasicDescription format;
    UInt32 size = sizeof(format);
    if (!AudioConverterGetProperty(at->converter,
                                   kAudioConverterCurrentInputStreamDescription,
                                   &size, &format)) {
        if (format.mSampleRate)
            avctx->sample_rate = format.mSampleRate;
        avctx->channels = format.mChannelsPerFrame;
        avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
        avctx->frame_size = format.mFramesPerPacket;
    }

    if (!AudioConverterGetProperty(at->converter,
                                   kAudioConverterCurrentOutputStreamDescription,
                                   &size, &format)) {
        format.mSampleRate = avctx->sample_rate;
        format.mChannelsPerFrame = avctx->channels;
        AudioConverterSetProperty(at->converter,
                                  kAudioConverterCurrentOutputStreamDescription,
                                  size, &format);
    }

    if (!AudioConverterGetPropertyInfo(at->converter, kAudioConverterOutputChannelLayout,
                                       &size, NULL) && size) {
        AudioChannelLayout *layout = av_malloc(size);
        uint64_t layout_mask = 0;
        int i;
        if (!layout)
            return AVERROR(ENOMEM);
        AudioConverterGetProperty(at->converter, kAudioConverterOutputChannelLayout,
                                  &size, layout);
        if (!(layout = ffat_convert_layout(layout, &size)))
            return AVERROR(ENOMEM);
        for (i = 0; i < layout->mNumberChannelDescriptions; i++) {
            int id = ffat_get_channel_id(layout->mChannelDescriptions[i].mChannelLabel);
            if (id < 0)
                goto done;
            if (layout_mask & (1 << id))
                goto done;
            layout_mask |= 1 << id;
            layout->mChannelDescriptions[i].mChannelFlags = i; // Abusing flags as index
        }
        avctx->channel_layout = layout_mask;
        qsort(layout->mChannelDescriptions, layout->mNumberChannelDescriptions,
              sizeof(AudioChannelDescription), &ffat_compare_channel_descriptions);
        for (i = 0; i < layout->mNumberChannelDescriptions; i++)
            at->channel_map[i] = layout->mChannelDescriptions[i].mChannelFlags;
done:
        av_free(layout);
    }

    if (!avctx->frame_size)
        avctx->frame_size = 2048;

    return 0;
}
// Some audio formats have a magic cookie associated with them which is required to decompress audio data
// When converting audio, a magic cookie may be returned by the Audio Converter so that it may be stored along with
// the output data -- This is done so that it may then be passed back to the Audio Converter at a later time as required
static void WriteCookie(AudioConverterRef converter, AudioFileID destinationFileID)
{
    // grab the cookie from the converter and write it to the destinateion file
	UInt32 cookieSize = 0;
	OSStatus error = AudioConverterGetPropertyInfo(converter, kAudioConverterCompressionMagicCookie, &cookieSize, NULL);
    
    // if there is an error here, then the format doesn't have a cookie - this is perfectly fine as some formats do not
	if (noErr == error && 0 != cookieSize) {
		char* cookie = new char [cookieSize];
		
		error = AudioConverterGetProperty(converter, kAudioConverterCompressionMagicCookie, &cookieSize, cookie);
        if (noErr == error) {
            error = AudioFileSetProperty(destinationFileID, kAudioFilePropertyMagicCookieData, cookieSize, cookie);
            if (noErr == error) {
                printf("Writing magic cookie to destination file: %ld\n", cookieSize);
            } else {
                printf("Even though some formats have cookies, some files don't take them and that's OK\n");
            }
        } else {
            printf("Could not Get kAudioConverterCompressionMagicCookie from Audio Converter!\n");
        }
        
		delete [] cookie;
	}
}
Beispiel #4
0
    void
    AACEncode::setBitrate(int bitrate)
    {
        if(m_bitrate != bitrate) {
            m_converterMutex.lock();
            UInt32 br = bitrate;
            AudioConverterDispose(m_audioConverter);

            const OSType subtype = kAudioFormatMPEG4AAC;
            AudioClassDescription requestedCodecs[2] = {
                {
                    kAudioEncoderComponentType,
                    subtype,
                    kAppleSoftwareAudioCodecManufacturer
                },
                {
                    kAudioEncoderComponentType,
                    subtype,
                    kAppleHardwareAudioCodecManufacturer
                }
            };
            AudioConverterNewSpecific(&m_in, &m_out, 2,requestedCodecs, &m_audioConverter);
            OSStatus result = AudioConverterSetProperty(m_audioConverter, kAudioConverterEncodeBitRate, sizeof(br), &br);
            UInt32 propSize = sizeof(br);
            
            if(result == noErr) {
                AudioConverterGetProperty(m_audioConverter, kAudioConverterEncodeBitRate, &propSize, &br);
                m_bitrate = br;
            }
            m_converterMutex.unlock();
        }
    }
Beispiel #5
0
// _______________________________________________________________________________________
//
void	CAAudioFile::FlushEncoder()
{
	if (mConverter != NULL) {
		mFinishingEncoding = true;
		WritePacketsFromCallback(WriteInputProc, this);
		mFinishingEncoding = false;

		// get priming info from converter, set it on the file
		if (mFileDataFormat.mBitsPerChannel == 0) {
			UInt32 propertySize;
			OSStatus err;
			AudioConverterPrimeInfo primeInfo;
			propertySize = sizeof(primeInfo);

			err = AudioConverterGetProperty(mConverter, kAudioConverterPrimeInfo, &propertySize, &primeInfo);
			if (err == noErr) {
				AudioFilePacketTableInfo pti;
				propertySize = sizeof(pti);
				err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, &propertySize, &pti);
				if (err == noErr) {
//printf("old packet table info: %qd valid, %ld priming, %ld remainder\n", pti.mNumberValidFrames, pti.mPrimingFrames, pti.mRemainderFrames);
					UInt64 totalFrames = pti.mNumberValidFrames + pti.mPrimingFrames + pti.mRemainderFrames;
					pti.mPrimingFrames = primeInfo.leadingFrames;
					pti.mRemainderFrames = primeInfo.trailingFrames;
					pti.mNumberValidFrames = totalFrames - pti.mPrimingFrames - pti.mRemainderFrames;
//printf("new packet table info: %qd valid, %ld priming, %ld remainder\n", pti.mNumberValidFrames, pti.mPrimingFrames, pti.mRemainderFrames);
					XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, sizeof(pti), &pti), "couldn't set packet table info on audio file");
				}
			}
		}
	}
}
Beispiel #6
0
// _______________________________________________________________________________________
//
CFArrayRef  CAAudioFile::GetConverterConfig()
{
	CFArrayRef plist;
	UInt32 propertySize = sizeof(plist);
	XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterPropertySettings, &propertySize, &plist), "get converter property settings");
	return plist;
}
Beispiel #7
0
// setup conversion from Squeak to device frame format, or vice-versa.
// requires: stereo for output, stereo or mono for input.
//
static int Stream_setFormat(Stream *s, int frameCount, int sampleRate, int stereo)
{
  int nChannels=	1 + stereo;
  AudioStreamBasicDescription imgFmt, devFmt;
  UInt32 sz= sizeof(devFmt);

  if (0 == s->direction) nChannels= 2;	// insist

  if (checkError(AudioDeviceGetProperty(s->id, 0, s->direction,
					kAudioDevicePropertyStreamFormat,
					&sz, &devFmt),
		 "GetProperty", "StreamFormat"))
    return 0;

  debugf("stream %p[%d] device format:\n", s, s->direction);  dumpFormat(&devFmt);

  imgFmt.mSampleRate	   = sampleRate;
  imgFmt.mFormatID	   = kAudioFormatLinearPCM;
#if defined(WORDS_BIGENDIAN)
  imgFmt.mFormatFlags	   = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian;
#else
  imgFmt.mFormatFlags	   = kLinearPCMFormatFlagIsSignedInteger;
#endif
  imgFmt.mBytesPerPacket   = SqueakFrameSize / (3 - nChannels);
  imgFmt.mFramesPerPacket  = 1;
  imgFmt.mBytesPerFrame    = SqueakFrameSize / (3 - nChannels);
  imgFmt.mChannelsPerFrame = nChannels;
  imgFmt.mBitsPerChannel   = 16;

  debugf("stream %p[%d] image format:\n", s, s->direction);  dumpFormat(&imgFmt);

  if (s->direction) // input
    {
      if (checkError(AudioConverterNew(&devFmt, &imgFmt, &s->converter), "AudioConverter", "New"))
	return 0;
      sz= sizeof(s->cvtBufSize);
      s->cvtBufSize= 512 * devFmt.mBytesPerFrame;
      if (checkError(AudioConverterGetProperty(s->converter, kAudioConverterPropertyCalculateOutputBufferSize,
					       &sz, &s->cvtBufSize), 
		     "GetProperty", "OutputBufferSize"))
	return 0;
    }
  else // output
    {
      if (checkError(AudioConverterNew(&imgFmt, &devFmt, &s->converter), "AudioConverter", "New"))
	return 0;
    }

  s->channels=   nChannels;
  s->sampleRate= sampleRate;
  s->imgBufSize= SqueakFrameSize * nChannels * frameCount;

  frameCount= max(frameCount, 512 * sampleRate / devFmt.mSampleRate);

  s->buffer= Buffer_new((s->direction ? DeviceFrameSize : SqueakFrameSize) * nChannels * frameCount * 2);

  debugf("stream %p[%d] sound buffer size %d/%d (%d)\n", s, s->direction, s->imgBufSize, s->buffer->size, frameCount);

  return 1;
}
Beispiel #8
0
UInt32 AudioConverterX::getEncodeBitRate()
{
    UInt32 value;
    UInt32 size = sizeof value;
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
        kAudioConverterEncodeBitRate, &size, &value));
    return value;
}
Beispiel #9
0
UInt32 AudioConverterX::getSampleRateConverterComplexity()
{
    UInt32 value;
    UInt32 size = sizeof value;
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
            kAudioConverterSampleRateConverterComplexity, &size, &value));
    return value;
}
Beispiel #10
0
UInt32 AudioConverterX::getCodecQuality()
{
    UInt32 result;
    UInt32 size = sizeof result;
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
                kAudioConverterCodecQuality, &size, &result));
    return result;
}
Beispiel #11
0
UInt32 AudioConverterX::getMaximumOutputPacketSize()
{
    UInt32 result;
    UInt32 size = sizeof result;
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
                kAudioConverterPropertyMaximumOutputPacketSize,
                &size, &result));
    return result;
}
Beispiel #12
0
UInt32 AudioConverterX::getBitRateControlMode()
{
    UInt32 result;
    UInt32 size = sizeof result;
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
                kAudioCodecPropertyBitRateControlMode,
                &size, &result));
    return result;
}
Beispiel #13
0
UInt32 AudioConverterX::getSoundQualityForVBR()
{
    UInt32 result;
    UInt32 size = sizeof result;
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
                kAudioCodecPropertySoundQualityForVBR,
                &size, &result));
    return result;
}
Beispiel #14
0
UInt32 AudioConverterX::getPrimeMethod()
{
    UInt32 value;
    UInt32 size = sizeof value;
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
                                      kAudioConverterPrimeMethod,
                                      &size, &value));
    return value;
}
Beispiel #15
0
AudioStreamBasicDescription AudioConverterX::getOutputStreamDescription()
{
    AudioStreamBasicDescription result;
    UInt32 size = sizeof(result);
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
                kAudioConverterCurrentOutputStreamDescription,
                &size, &result));
    return result;
}
Beispiel #16
0
AudioConverterPrimeInfo AudioConverterX::getPrimeInfo()
{
    AudioConverterPrimeInfo result;
    UInt32 size = sizeof(AudioConverterPrimeInfo);
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
                                      kAudioConverterPrimeInfo,
                                      &size, &result));
    return result;
}
Beispiel #17
0
std::vector<AudioValueRange> AudioConverterX::getApplicableEncodeBitRates()
{
    UInt32 size;
    Boolean writable;
    CHECKCA(AudioConverterGetPropertyInfo(m_converter.get(),
                kAudioConverterApplicableEncodeBitRates, &size, &writable));
    std::vector<AudioValueRange> vec(size / sizeof(AudioValueRange));
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
            kAudioConverterApplicableEncodeBitRates, &size, vec.data()));
    return vec;
}
Beispiel #18
0
std::vector<uint8_t> AudioConverterX::getCompressionMagicCookie()
{
    UInt32 size;
    Boolean writable;
    CHECKCA(AudioConverterGetPropertyInfo(m_converter.get(),
                kAudioConverterCompressionMagicCookie, &size, &writable));
    std::vector<uint8_t> vec(size / sizeof(uint8_t));
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
            kAudioConverterCompressionMagicCookie, &size, vec.data()));
    return vec;
}
Beispiel #19
0
std::shared_ptr<AudioChannelLayout> AudioConverterX::getOutputChannelLayout()
{
    UInt32 size;
    Boolean writable;
    CHECKCA(AudioConverterGetPropertyInfo(m_converter.get(),
                kAudioConverterOutputChannelLayout, &size, &writable));
    std::shared_ptr<AudioChannelLayout> acl(
        static_cast<AudioChannelLayout*>(std::malloc(size)),
        std::free);
    CHECKCA(AudioConverterGetProperty(m_converter.get(),
            kAudioConverterOutputChannelLayout, &size, acl.get()));
    return acl;
}
Beispiel #20
0
static void ffat_update_ctx(AVCodecContext *avctx)
{
    ATDecodeContext *at = avctx->priv_data;
    UInt32 size = sizeof(unsigned);
    AudioConverterPrimeInfo prime_info;
    AudioStreamBasicDescription out_format;

    AudioConverterGetProperty(at->converter,
                              kAudioConverterPropertyMaximumOutputPacketSize,
                              &size, &at->pkt_size);

    if (at->pkt_size <= 0)
        at->pkt_size = 1024 * 50;

    size = sizeof(prime_info);

    if (!AudioConverterGetProperty(at->converter,
                                   kAudioConverterPrimeInfo,
                                   &size, &prime_info)) {
        avctx->initial_padding = prime_info.leadingFrames;
    }

    size = sizeof(out_format);
    if (!AudioConverterGetProperty(at->converter,
                                   kAudioConverterCurrentOutputStreamDescription,
                                   &size, &out_format)) {
        if (out_format.mFramesPerPacket)
            avctx->frame_size = out_format.mFramesPerPacket;
        if (out_format.mBytesPerPacket && avctx->codec_id == AV_CODEC_ID_ILBC)
            avctx->block_align = out_format.mBytesPerPacket;
    }

    at->frame_size = avctx->frame_size;
    if (avctx->codec_id == AV_CODEC_ID_PCM_MULAW ||
        avctx->codec_id == AV_CODEC_ID_PCM_ALAW) {
        at->pkt_size *= 1024;
        avctx->frame_size *= 1024;
    }
}
// Sets the packet table containing information about the number of valid frames in a file and where they begin and end
// for the file types that support this information.
// Calling this function makes sure we write out the priming and remainder details to the destination file	
static void WritePacketTableInfo(AudioConverterRef converter, AudioFileID destinationFileID)
{
    UInt32 isWritable;
    UInt32 dataSize;
    OSStatus error = AudioFileGetPropertyInfo(destinationFileID, kAudioFilePropertyPacketTableInfo, &dataSize, &isWritable);
    if (noErr == error && isWritable) {

        AudioConverterPrimeInfo primeInfo;
        dataSize = sizeof(primeInfo);

        // retrieve the leadingFrames and trailingFrames information from the converter,
        error = AudioConverterGetProperty(converter, kAudioConverterPrimeInfo, &dataSize, &primeInfo);
        if (noErr == error) {
            // we have some priming information to write out to the destination file
            /* The total number of packets in the file times the frames per packet (or counting each packet's
               frames individually for a variable frames per packet format) minus mPrimingFrames, minus
               mRemainderFrames, should equal mNumberValidFrames.
            */
            AudioFilePacketTableInfo pti;
            dataSize = sizeof(pti);
            error = AudioFileGetProperty(destinationFileID, kAudioFilePropertyPacketTableInfo, &dataSize, &pti);
            if (noErr == error) {
                // there's priming to write out to the file
                UInt64 totalFrames = pti.mNumberValidFrames + pti.mPrimingFrames + pti.mRemainderFrames; // get the total number of frames from the output file
                printf("Total number of frames from output file: %lld\n", totalFrames);
                
                pti.mPrimingFrames = primeInfo.leadingFrames;
                pti.mRemainderFrames = primeInfo.trailingFrames;
                pti.mNumberValidFrames = totalFrames - pti.mPrimingFrames - pti.mRemainderFrames;
            
                error = AudioFileSetProperty(destinationFileID, kAudioFilePropertyPacketTableInfo, sizeof(pti), &pti);
                if (noErr == error) {
                    printf("Writing packet table information to destination file: %ld\n", sizeof(pti));
                    printf("     Total valid frames: %lld\n", pti.mNumberValidFrames);
                    printf("         Priming frames: %ld\n", pti.mPrimingFrames);
                    printf("       Remainder frames: %ld\n", pti.mRemainderFrames);
                } else {
                    printf("Some audio files can't contain packet table information and that's OK\n");
                }
            } else {
                 printf("Getting kAudioFilePropertyPacketTableInfo error: %ld\n", error);
            }
        } else {
            printf("No kAudioConverterPrimeInfo available and that's OK\n");
        }
    } else {
        printf("GetPropertyInfo for kAudioFilePropertyPacketTableInfo error: %ld, isWritable: %ld\n", error, isWritable);
    }
}
// Write output channel layout to destination file
static void WriteDestinationChannelLayout(AudioConverterRef converter, AudioFileID sourceFileID, AudioFileID destinationFileID)
{
    UInt32 layoutSize = 0;
    bool layoutFromConverter = true;
    
    OSStatus error = AudioConverterGetPropertyInfo(converter, kAudioConverterOutputChannelLayout, &layoutSize, NULL);
        
    // if the Audio Converter doesn't have a layout see if the input file does
    if (error || 0 == layoutSize) {
        error = AudioFileGetPropertyInfo(sourceFileID, kAudioFilePropertyChannelLayout, &layoutSize, NULL);
        layoutFromConverter = false;
    }
    
    if (noErr == error && 0 != layoutSize) {
        char* layout = new char[layoutSize];
        
        if (layoutFromConverter) {
            error = AudioConverterGetProperty(converter, kAudioConverterOutputChannelLayout, &layoutSize, layout);
            if (error) printf("Could not Get kAudioConverterOutputChannelLayout from Audio Converter!\n");
        } else {
            error = AudioFileGetProperty(sourceFileID, kAudioFilePropertyChannelLayout, &layoutSize, layout);
            if (error) printf("Could not Get kAudioFilePropertyChannelLayout from source file!\n");
        }
        
        if (noErr == error) {
            error = AudioFileSetProperty(destinationFileID, kAudioFilePropertyChannelLayout, layoutSize, layout);
            if (noErr == error) {
                printf("Writing channel layout to destination file: %ld\n", layoutSize);
            } else {
                printf("Even though some formats have layouts, some files don't take them and that's OK\n");
            }
        }
        
        delete [] layout;
    }
}
Beispiel #23
0
/***********************************************************************
 * hb_work_encCoreAudio_init
 ***********************************************************************
 *
 **********************************************************************/
int encCoreAudioInit( hb_work_object_t * w, hb_job_t * job, enum AAC_MODE mode )
{
    hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
    hb_audio_t * audio = w->audio;
    AudioStreamBasicDescription input, output;
    UInt32 tmp, tmpsiz = sizeof( tmp );
    OSStatus err;

    w->private_data = pv;
    pv->job = job;

    // pass the number of channels used into the private work data
    pv->nchannels = HB_AMIXDOWN_GET_DISCRETE_CHANNEL_COUNT( audio->config.out.mixdown );

    bzero( &input, sizeof( AudioStreamBasicDescription ) );
    input.mSampleRate = ( Float64 ) audio->config.out.samplerate;
    input.mFormatID = kAudioFormatLinearPCM;
    input.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagsNativeEndian;
    input.mBytesPerPacket = 4 * pv->nchannels;
    input.mFramesPerPacket = 1;
    input.mBytesPerFrame = input.mBytesPerPacket * input.mFramesPerPacket;
    input.mChannelsPerFrame = pv->nchannels;
    input.mBitsPerChannel = 32;

    bzero( &output, sizeof( AudioStreamBasicDescription ) );
    switch ( mode ) 
    {
        case AAC_MODE_HE:
            output.mFormatID = kAudioFormatMPEG4AAC_HE;
            break;
        case AAC_MODE_LC:
        default:
            output.mFormatID = kAudioFormatMPEG4AAC;
            break;
    }
    output.mSampleRate = ( Float64 ) audio->config.out.samplerate;
    output.mChannelsPerFrame = pv->nchannels;
    // let CoreAudio decide the rest...

    // initialise encoder
    err = AudioConverterNew( &input, &output, &pv->converter );
    if( err != noErr)
    {
        // Retry without the samplerate
        bzero( &output, sizeof( AudioStreamBasicDescription ) );
        switch ( mode )
        {
            case AAC_MODE_HE:
                output.mFormatID = kAudioFormatMPEG4AAC_HE;
                break;
            case AAC_MODE_LC:
            default:
                output.mFormatID = kAudioFormatMPEG4AAC;
                break;
        }
        output.mChannelsPerFrame = pv->nchannels;

        err = AudioConverterNew( &input, &output, &pv->converter );

        if( err != noErr)
        {
            hb_log( "Error creating an AudioConverter err=%"PRId64" %"PRIu64, (int64_t)err, (uint64_t)output.mBytesPerFrame );
            *job->die = 1;
            return 0;
        }
    }

    if( ( audio->config.out.mixdown == HB_AMIXDOWN_6CH ) && ( audio->config.in.codec == HB_ACODEC_AC3) )
    {
        SInt32 channelMap[6] = { 2, 1, 3, 4, 5, 0 };
        AudioConverterSetProperty( pv->converter, kAudioConverterChannelMap,
                                   sizeof( channelMap ), channelMap );
    }

    // set encoder quality to maximum
    tmp = kAudioConverterQuality_Max;
    AudioConverterSetProperty( pv->converter, kAudioConverterCodecQuality,
                               sizeof( tmp ), &tmp );

    // set encoder bitrate control mode to constrained variable
    tmp = kAudioCodecBitRateControlMode_VariableConstrained;
    AudioConverterSetProperty( pv->converter, kAudioCodecPropertyBitRateControlMode,
                               sizeof( tmp ), &tmp );

    // get available bitrates
    AudioValueRange *bitrates;
    ssize_t bitrateCounts;
    err = AudioConverterGetPropertyInfo( pv->converter, kAudioConverterApplicableEncodeBitRates,
                                         &tmpsiz, NULL);
    bitrates = malloc( tmpsiz );
    err = AudioConverterGetProperty( pv->converter, kAudioConverterApplicableEncodeBitRates,
                                     &tmpsiz, bitrates);
    bitrateCounts = tmpsiz / sizeof( AudioValueRange );

    // set bitrate
    tmp = audio->config.out.bitrate * 1000;
    if( tmp < bitrates[0].mMinimum )
        tmp = bitrates[0].mMinimum;
    if( tmp > bitrates[bitrateCounts-1].mMinimum )
        tmp = bitrates[bitrateCounts-1].mMinimum;
    free( bitrates );
    if( tmp != audio->config.out.bitrate * 1000 )
        hb_log( "encca_aac: sanitizing track %d audio bitrate %d to %"PRIu32"", 
                audio->config.out.track, audio->config.out.bitrate, tmp/1000 );
    AudioConverterSetProperty( pv->converter, kAudioConverterEncodeBitRate,
                               sizeof( tmp ), &tmp );

    // get real input
    tmpsiz = sizeof( input );
    AudioConverterGetProperty( pv->converter,
                               kAudioConverterCurrentInputStreamDescription,
                               &tmpsiz, &input );
    // get real output
    tmpsiz = sizeof( output );
    AudioConverterGetProperty( pv->converter,
                               kAudioConverterCurrentOutputStreamDescription,
                               &tmpsiz, &output );

    // set sizes
    pv->isamplesiz  = input.mBytesPerPacket;
    pv->isamples    = output.mFramesPerPacket;
    pv->osamplerate = output.mSampleRate;

    // get maximum output size
    AudioConverterGetProperty( pv->converter,
                               kAudioConverterPropertyMaximumOutputPacketSize,
                               &tmpsiz, &tmp );
    pv->omaxpacket = tmp;

    // get magic cookie (elementary stream descriptor)
    tmp = HB_CONFIG_MAX_SIZE;
    AudioConverterGetProperty( pv->converter,
                               kAudioConverterCompressionMagicCookie,
                               &tmp, w->config->aac.bytes );
    // CoreAudio returns a complete ESDS, but we only need
    // the DecoderSpecific info.
    UInt8* buffer = NULL;
    ReadESDSDescExt(w->config->aac.bytes, &buffer, &tmpsiz, 0);
    w->config->aac.length = tmpsiz;
    memmove( w->config->aac.bytes, buffer,
             w->config->aac.length );

    pv->list = hb_list_init();
    pv->buf = NULL;

    return 0;
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate) 
{
    ExtAudioFileRef sourceFile = 0;
    ExtAudioFileRef destinationFile = 0;
    Boolean         canResumeFromInterruption = true; // we can continue unless told otherwise
    OSStatus        error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    ThreadStateSetRunning();
    
    printf("DoConvertFile\n");
    
	try {
        CAStreamBasicDescription srcFormat, dstFormat;

        // open the source file
        XThrowIfError(ExtAudioFileOpenURL(sourceURL, &sourceFile), "ExtAudioFileOpenURL failed");
			
        // get the source data format
		UInt32 size = sizeof(srcFormat);
		XThrowIfError(ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &size, &srcFormat), "couldn't get source data format");
		
		printf("\nSource file format: "); srcFormat.Print();

        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if PCM was selected as the destination format, create a 16-bit int PCM file format description
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("\nDestination file format: "); dstFormat.Print();
        
        // create the destination file 
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, NULL, kAudioFileFlags_EraseFile, &destinationFile), "ExtAudioFileCreateWithURL failed!");

        // set the client format - The format must be linear PCM (kAudioFormatLinearPCM)
        // You must set this in order to encode or decode a non-PCM file data format
        // You may set this on PCM files to specify the data format used in your calls to read/write
        CAStreamBasicDescription clientFormat;
        if (outputFormat == kAudioFormatLinearPCM) {
            clientFormat = dstFormat;
        } else {
            clientFormat.SetCanonical(srcFormat.NumberChannels(), true);
            clientFormat.mSampleRate = srcFormat.mSampleRate;
        }
        
        printf("\nClient data format: "); clientFormat.Print();
        printf("\n");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set source client format");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(destinationFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set destination client format");

        // can the audio converter (which in this case is owned by an ExtAudioFile object) resume conversion after an interruption?
        AudioConverterRef audioConverter;
                    
        size = sizeof(audioConverter);
        XThrowIfError(ExtAudioFileGetProperty(destinationFile, kExtAudioFileProperty_AudioConverter, &size, &audioConverter), "Couldn't get Audio Converter!");
        
        // this property may be queried at any time after construction of the audio converter (which in this case is owned by an ExtAudioFile object)
        // after setting the output format -- there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(audioConverter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported!\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld\n", error);
            }
            
            error = noErr;
        }
        
        // set up buffers
        UInt32 bufferByteSize = 32768;
        char srcBuffer[bufferByteSize];
        
        // keep track of the source file offset so we know where to reset the source for
        // reading if interrupted and input was not consumed by the audio converter
        SInt64 sourceFrameOffset = 0;
        
        //***** do the read and write - the conversion is done on and by the write call *****//
        printf("Converting...\n");
        while (1) {
        
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = clientFormat.NumberChannels();
            fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
            fillBufList.mBuffers[0].mData = srcBuffer;
                
            // client format is always linear PCM - so here we determine how many frames of lpcm
            // we can read/write given our buffer size
            UInt32 numFrames;
            if (clientFormat.mBytesPerFrame > 0) // rids bogus analyzer div by zero warning mBytesPerFrame can't be 0 and is protected by an Assert
                numFrames = clientFormat.BytesToFrames(bufferByteSize); // (bufferByteSize / clientFormat.mBytesPerFrame);

            XThrowIfError(ExtAudioFileRead(sourceFile, &numFrames, &fillBufList), "ExtAudioFileRead failed!");	
            if (!numFrames) {
                // this is our termination condition
                error = noErr;
                break;
            }
            sourceFrameOffset += numFrames;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = ThreadStatePausedCheck();
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the audio converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            error = ExtAudioFileWrite(destinationFile, numFrames, &fillBufList);
            // if interrupted in the process of the write call, we must handle the errors appropriately
            if (error) {
                if (kExtAudioFileError_CodecUnavailableInputConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was successfully
                        consumed and you may proceed to the next buffer
                    */
                    
                } else if (kExtAudioFileError_CodecUnavailableInputNotConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputNotConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was not
                        successfully consumed and you must try to write it again
                    */
                    
                    // seek back to last offset before last read so we can try again after the interruption
                    sourceFrameOffset -= numFrames;
                    XThrowIfError(ExtAudioFileSeek(sourceFile, sourceFrameOffset), "ExtAudioFileSeek failed!");
                    
                } else {
                    XThrowIfError(error, "ExtAudioFileWrite error!");
                }
            } // if
        } // while
	}
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // close
    if (destinationFile) ExtAudioFileDispose(destinationFile);
    if (sourceFile) ExtAudioFileDispose(sourceFile);

    // transition thread state to kStateDone before continuing
    ThreadStateSetDone();
    
    return error;
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate)
{
	AudioFileID         sourceFileID = 0;
    AudioFileID         destinationFileID = 0;
    AudioConverterRef   converter = NULL;
    Boolean             canResumeFromInterruption = true; // we can continue unless told otherwise
    
    CAStreamBasicDescription srcFormat, dstFormat;
    AudioFileIO afio = {};
    
    char                         *outputBuffer = NULL;
    AudioStreamPacketDescription *outputPacketDescriptions = NULL;
    
    OSStatus error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    
    printf("\nDoConvertFile\n");
    
    try {
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed");
	
        // get the source data format
        UInt32 size = sizeof(srcFormat);
        XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format");
        
        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if the output format is PC create a 16-bit int PCM file format description as an example
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("Source File format: "); srcFormat.Print();
        printf("Destination format: "); dstFormat.Print();
	
        // create the AudioConverter
        
        XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!");
    
        // if the source has a cookie, get it and set it on the Audio Converter
        ReadCookie(sourceFileID, converter);

        // get the actual formats back from the Audio Converter
        size = sizeof(srcFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!");

        size = sizeof(dstFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!");

        printf("Formats returned from AudioConverter:\n");
        printf("              Source format: "); srcFormat.Print();
        printf("    Destination File format: "); dstFormat.Print();
        
        // if encoding to AAC set the bitrate
        // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
        // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate
        //     but there are combinations (also depending on the number of channels) which will not be allowed
        // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels
        // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases
        //    and if you have stereo or more, you can multiply that number by the number of channels.
        
        if (outputBitRate == 0) {
            outputBitRate = 192000; // 192kbs
        }
        
        if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) {
            
            UInt32 propSize = sizeof(outputBitRate);
            
            // set the bit rate depending on the samplerate chosen
            XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate),
                           "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!");
            
            // get it back and print it out
            AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate);
            printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate);
        }

        // can the Audio Converter resume conversion after an interruption?
        // this property may be queried at any time after construction of the Audio Converter after setting its output format
        // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error);
            }
            
            error = noErr;
        }
        
        // create the destination file 
        XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!");

        // set up source buffers and data proc info struct
        afio.srcFileID = sourceFileID;
        afio.srcBufferSize = 32768;
        afio.srcBuffer = new char [afio.srcBufferSize];
        afio.srcFilePos = 0;
        afio.srcFormat = srcFormat;
		
        if (srcFormat.mBytesPerPacket == 0) {
            // if the source format is VBR, we need to get the maximum packet size
            // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
            // in the file (without actually scanning the whole file to find the largest packet,
            // as may happen with kAudioFilePropertyMaximumPacketSize)
            size = sizeof(afio.srcSizePerPacket);
            XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!");
            
            // how many packets can we read for our buffer size?
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead];
        } else {
            // CBR source format
            afio.srcSizePerPacket = srcFormat.mBytesPerPacket;
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            afio.packetDescriptions = NULL;
        }

        // set up output buffers
        UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR
        UInt32 theOutputBufSize = 32768;
        outputBuffer = new char[theOutputBufSize];
        
        if (outputSizePerPacket == 0) {
            // if the destination format is VBR, we need to get max size per packet from the converter
            size = sizeof(outputSizePerPacket);
            XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!");
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
        }
        UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket;

        // if the destination format has a cookie, get it and set it on the output file
        WriteCookie(converter, destinationFileID);

        // write destination channel layout
        if (srcFormat.mChannelsPerFrame > 2) {
            WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID);
        }

        UInt64 totalOutputFrames = 0; // used for debgging printf
        SInt64 outputFilePos = 0;
        
        // loop to convert data
        printf("Converting...\n");
        while (1) {

            // set up output buffer list
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame;
            fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize;
            fillBufList.mBuffers[0].mData = outputBuffer;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = NO;
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the Audio Converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            // convert data
            UInt32 ioOutputDataPackets = numOutputPackets;
            printf("AudioConverterFillComplexBuffer...\n");
            error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions);
            // if interrupted in the process of the conversion call, we must handle the error appropriately
            if (error) {
                if (kAudioConverterErr_HardwareInUse == error) {
                     printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n");
                } else {
                    XThrowIfError(error, "AudioConverterFillComplexBuffer error!");
                }
            } else {
                if (ioOutputDataPackets == 0) {
                    // this is the EOF conditon
                    error = noErr;
                    break;
                }
            }
            
            if (noErr == error) {
                // write to output file
                UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize;
                XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!");
            
                printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes);
                
                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.mFramesPerPacket) { 
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket);
                } else if (outputPacketDescriptions != NULL) {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (UInt32 i = 0; i < ioOutputDataPackets; ++i)
                        totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket;
                }
            }
        } // while

        if (noErr == error) {
            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.mBitsPerChannel == 0) {
                // our output frame count should jive with
                printf("Total number of output frames counted: %lld\n", totalOutputFrames); 
                WritePacketTableInfo(converter, destinationFileID);
            }
        
            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFileID);
        }
    }
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // cleanup
    if (converter) AudioConverterDispose(converter);
    if (destinationFileID) AudioFileClose(destinationFileID);
	if (sourceFileID) AudioFileClose(sourceFileID);
    
    if (afio.srcBuffer) delete [] afio.srcBuffer;
    if (afio.packetDescriptions) delete [] afio.packetDescriptions;
    if (outputBuffer) delete [] outputBuffer;
    if (outputPacketDescriptions) delete [] outputPacketDescriptions;
    
    
    return error;
}
JNIEXPORT jint JNICALL Java_com_apple_audio_toolbox_AudioConverter_AudioConverterGetProperty
  (JNIEnv *, jclass, jint inAudioConverter, jint inPropertyID, jint ioPropertyDataSize, jint outPropertyData)
{
	return (jint)AudioConverterGetProperty((AudioConverterRef)inAudioConverter, (AudioConverterPropertyID)inPropertyID, (UInt32 *)ioPropertyDataSize, (void *)outPropertyData);
}
Beispiel #27
0
static GVBool gviHardwareInitPlayback(GVIDevice * device)
{
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	UInt32 size;
	OSStatus result;
	UInt32 primeMethod;
	SInt32 channelMap[100];
	int i;

	// create the array of sources
	data->m_playbackSources = gviNewSourceList();
	if(!data->m_playbackSources)
		return GVFalse;

	// get the playback format
	size = sizeof(AudioStreamBasicDescription);
	result = AudioDeviceGetProperty(device->m_deviceID, 0, false, kAudioDevicePropertyStreamFormat, &size, &data->m_playbackStreamDescriptor);
	if(result != noErr)
	{
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// create a converter from the GV format to the playback format
	result = AudioConverterNew(&GVIVoiceFormat, &data->m_playbackStreamDescriptor, &data->m_playbackConverter);
	if(result != noErr)
	{
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// set it to do no priming
	primeMethod = kConverterPrimeMethod_None;
	result = AudioConverterSetProperty(data->m_playbackConverter, kAudioConverterPrimeMethod, sizeof(UInt32), &primeMethod);
	if(result != noErr)
	{
		AudioConverterDispose(data->m_playbackConverter);
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// setup the converter to map the input channel to all output channels
	result = AudioConverterGetPropertyInfo(data->m_playbackConverter, kAudioConverterChannelMap, &size, NULL);
	if(result == noErr)
	{
		result = AudioConverterGetProperty(data->m_playbackConverter, kAudioConverterChannelMap, &size, channelMap);
		if(result == noErr)
		{
			for(i = 0 ; i < (size / sizeof(SInt32)) ; i++)
				channelMap[i] = 0;

			AudioConverterSetProperty(data->m_playbackConverter, kAudioConverterChannelMap, size, channelMap);
		}
	}

	// allocate the playback buffer
	data->m_playbackBuffer = (GVSample *)gsimalloc(GVIBytesPerFrame);
	if(!data->m_playbackBuffer)
	{
		AudioConverterDispose(data->m_playbackConverter);
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// add property listener
	AudioDeviceAddPropertyListener(device->m_deviceID, 0, false, kAudioDevicePropertyDeviceIsAlive, gviPropertyListener, device);

#if GVI_VOLUME_IN_SOFTWARE
	// init volume
	data->m_playbackVolume = (GVScalar)1.0;
#endif

	return GVTrue;
}
Beispiel #28
0
    AACEncode::AACEncode(int frequencyInHz, int channelCount, int bitrate)
    : m_sentConfig(false), m_bitrate(bitrate)
    {
        
        OSStatus result = 0;
        
        AudioStreamBasicDescription in = {0}, out = {0};
        
        
        // passing anything except 48000, 44100, and 22050 for mSampleRate results in "!dat"
        // OSStatus when querying for kAudioConverterPropertyMaximumOutputPacketSize property
        // below
        in.mSampleRate = frequencyInHz;
        // passing anything except 2 for mChannelsPerFrame results in "!dat" OSStatus when
        // querying for kAudioConverterPropertyMaximumOutputPacketSize property below
        in.mChannelsPerFrame = channelCount;
        in.mBitsPerChannel = 16;
        in.mFormatFlags =  kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
        in.mFormatID = kAudioFormatLinearPCM;
        in.mFramesPerPacket = 1;
        in.mBytesPerFrame = in.mBitsPerChannel * in.mChannelsPerFrame / 8;
        in.mBytesPerPacket = in.mFramesPerPacket*in.mBytesPerFrame;
        
        m_in = in;
        
        out.mFormatID = kAudioFormatMPEG4AAC;
        out.mFormatFlags = 0;
        out.mFramesPerPacket = kSamplesPerFrame;
        out.mSampleRate = frequencyInHz;
        out.mChannelsPerFrame = channelCount;
        

        m_out = out;
        
        UInt32 outputBitrate = bitrate;
        UInt32 propSize = sizeof(outputBitrate);
        UInt32 outputPacketSize = 0;

        const OSType subtype = kAudioFormatMPEG4AAC;
        AudioClassDescription requestedCodecs[2] = {
            {
                kAudioEncoderComponentType,
                subtype,
                kAppleSoftwareAudioCodecManufacturer
            },
            {
                kAudioEncoderComponentType,
                subtype,
                kAppleHardwareAudioCodecManufacturer
            }
        };
        
        result = AudioConverterNewSpecific(&in, &out, 2, requestedCodecs, &m_audioConverter);

        
        if(result == noErr) {
        
            result = AudioConverterSetProperty(m_audioConverter, kAudioConverterEncodeBitRate, propSize, &outputBitrate);

        }
        if(result == noErr) {
            result = AudioConverterGetProperty(m_audioConverter, kAudioConverterPropertyMaximumOutputPacketSize, &propSize, &outputPacketSize);
        }
        
        if(result == noErr) {
            m_outputPacketMaxSize = outputPacketSize;
            
            m_bytesPerSample = 2 * channelCount;
            
            uint8_t sampleRateIndex = 0;
            switch(frequencyInHz) {
                case 96000:
                    sampleRateIndex = 0;
                    break;
                case 88200:
                    sampleRateIndex = 1;
                    break;
                case 64000:
                    sampleRateIndex = 2;
                    break;
                case 48000:
                    sampleRateIndex = 3;
                    break;
                case 44100:
                    sampleRateIndex = 4;
                    break;
                case 32000:
                    sampleRateIndex = 5;
                    break;
                case 24000:
                    sampleRateIndex = 6;
                    break;
                case 22050:
                    sampleRateIndex = 7;
                    break;
                case 16000:
                    sampleRateIndex = 8;
                    break;
                case 12000:
                    sampleRateIndex = 9;
                    break;
                case 11025:
                    sampleRateIndex = 10;
                    break;
                case 8000:
                    sampleRateIndex = 11;
                    break;
                case 7350:
                    sampleRateIndex = 12;
                    break;
                default:
                    sampleRateIndex = 15;
            }
            makeAsc(sampleRateIndex, uint8_t(channelCount));
        } else {
            DLog("Error setting up audio encoder %x", (int)result);
        }
    }
// soundsource overrides
Result SoundSourceCoreAudio::tryOpen(const AudioSourceConfig& audioSrcCfg) {
    const QString fileName(getLocalFileName());

    //Open the audio file.
    OSStatus err;

    /** This code blocks works with OS X 10.5+ only. DO NOT DELETE IT for now. */
    CFStringRef urlStr = CFStringCreateWithCharacters(0,
            reinterpret_cast<const UniChar *>(fileName.unicode()),
            fileName.size());
    CFURLRef urlRef = CFURLCreateWithFileSystemPath(NULL, urlStr,
            kCFURLPOSIXPathStyle, false);
    err = ExtAudioFileOpenURL(urlRef, &m_audioFile);
    CFRelease(urlStr);
    CFRelease(urlRef);

    /** TODO: Use FSRef for compatibility with 10.4 Tiger.
     Note that ExtAudioFileOpen() is deprecated above Tiger, so we must maintain
     both code paths if someone finishes this part of the code.
     FSRef fsRef;
     CFURLGetFSRef(reinterpret_cast<CFURLRef>(url.get()), &fsRef);
     err = ExtAudioFileOpen(&fsRef, &m_audioFile);
     */

    if (err != noErr) {
        qDebug() << "SSCA: Error opening file " << fileName;
        return ERR;
    }

    // get the input file format
    UInt32 inputFormatSize = sizeof(m_inputFormat);
    err = ExtAudioFileGetProperty(m_audioFile,
            kExtAudioFileProperty_FileDataFormat, &inputFormatSize,
            &m_inputFormat);
    if (err != noErr) {
        qDebug() << "SSCA: Error getting file format (" << fileName << ")";
        return ERR;
    }
    m_bFileIsMp3 = m_inputFormat.mFormatID == kAudioFormatMPEGLayer3;

    // create the output format
    const UInt32 numChannels =
            (kChannelCountZero < audioSrcCfg.channelCountHint) ? audioSrcCfg.channelCountHint : 2;
    m_outputFormat = CAStreamBasicDescription(m_inputFormat.mSampleRate,
            numChannels, CAStreamBasicDescription::kPCMFormatFloat32, true);

    // set the client format
    err = ExtAudioFileSetProperty(m_audioFile,
            kExtAudioFileProperty_ClientDataFormat, sizeof(m_outputFormat),
            &m_outputFormat);
    if (err != noErr) {
        qDebug() << "SSCA: Error setting file property";
        return ERR;
    }

    //get the total length in frames of the audio file - copypasta: http://discussions.apple.com/thread.jspa?threadID=2364583&tstart=47
    SInt64 totalFrameCount;
    UInt32 totalFrameCountSize = sizeof(totalFrameCount);
    err = ExtAudioFileGetProperty(m_audioFile,
            kExtAudioFileProperty_FileLengthFrames, &totalFrameCountSize,
            &totalFrameCount);
    if (err != noErr) {
        qDebug() << "SSCA: Error getting number of frames";
        return ERR;
    }

    //
    // WORKAROUND for bug in ExtFileAudio
    //

    AudioConverterRef acRef;
    UInt32 acrsize = sizeof(AudioConverterRef);
    err = ExtAudioFileGetProperty(m_audioFile,
            kExtAudioFileProperty_AudioConverter, &acrsize, &acRef);
    //_ThrowExceptionIfErr(@"kExtAudioFileProperty_AudioConverter", err);

    AudioConverterPrimeInfo primeInfo;
    UInt32 piSize = sizeof(AudioConverterPrimeInfo);
    memset(&primeInfo, 0, piSize);
    err = AudioConverterGetProperty(acRef, kAudioConverterPrimeInfo, &piSize,
            &primeInfo);
    if (err != kAudioConverterErr_PropertyNotSupported) { // Only if decompressing
        //_ThrowExceptionIfErr(@"kAudioConverterPrimeInfo", err);
        m_headerFrames = primeInfo.leadingFrames;
    } else {
        m_headerFrames = 0;
    }

    setChannelCount(m_outputFormat.NumberChannels());
    setFrameRate(m_inputFormat.mSampleRate);
    // NOTE(uklotzde): This is what I found when migrating
    // the code from SoundSource (sample-oriented) to the new
    // AudioSource (frame-oriented) API. It is not documented
    // when m_headerFrames > 0 and what the consequences are.
    setFrameCount(totalFrameCount/* - m_headerFrames*/);

    //Seek to position 0, which forces us to skip over all the header frames.
    //This makes sure we're ready to just let the Analyser rip and it'll
    //get the number of samples it expects (ie. no header frames).
    seekSampleFrame(0);

    return OK;
}
Beispiel #30
0
/***********************************************************************
 * hb_work_encCoreAudio_init
 ***********************************************************************
 *
 **********************************************************************/
int encCoreAudioInit(hb_work_object_t *w, hb_job_t *job, enum AAC_MODE mode)
{
    hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
    hb_audio_t *audio = w->audio;
    AudioStreamBasicDescription input, output;
    UInt32 tmp, tmpsiz = sizeof(tmp);
    OSStatus err;

    w->private_data = pv;
    pv->job = job;

    // pass the number of channels used into the private work data
    pv->nchannels =
        hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);

    bzero(&input, sizeof(AudioStreamBasicDescription));
    input.mSampleRate = (Float64)audio->config.out.samplerate;
    input.mFormatID = kAudioFormatLinearPCM;
    input.mFormatFlags = (kLinearPCMFormatFlagIsFloat|kAudioFormatFlagsNativeEndian);
    input.mBytesPerPacket = 4 * pv->nchannels;
    input.mFramesPerPacket = 1;
    input.mBytesPerFrame = input.mBytesPerPacket * input.mFramesPerPacket;
    input.mChannelsPerFrame = pv->nchannels;
    input.mBitsPerChannel = 32;

    bzero(&output, sizeof(AudioStreamBasicDescription));
    switch (mode)
    {
        case AAC_MODE_HE:
            output.mFormatID = kAudioFormatMPEG4AAC_HE;
            break;
        case AAC_MODE_LC:
        default:
            output.mFormatID = kAudioFormatMPEG4AAC;
            break;
    }
    output.mSampleRate = (Float64)audio->config.out.samplerate;
    output.mChannelsPerFrame = pv->nchannels;
    // let CoreAudio decide the rest

    // initialise encoder
    err = AudioConverterNew(&input, &output, &pv->converter);
    if (err != noErr)
    {
        // Retry without the samplerate
        bzero(&output, sizeof(AudioStreamBasicDescription));
        switch (mode)
        {
            case AAC_MODE_HE:
                output.mFormatID = kAudioFormatMPEG4AAC_HE;
                break;
            case AAC_MODE_LC:
            default:
                output.mFormatID = kAudioFormatMPEG4AAC;
                break;
        }
        output.mChannelsPerFrame = pv->nchannels;

        err = AudioConverterNew(&input, &output, &pv->converter);

        if (err != noErr)
        {
            hb_log("Error creating an AudioConverter err=%"PRId64" output.mBytesPerFrame=%"PRIu64"",
                   (int64_t)err, (uint64_t)output.mBytesPerFrame);
            *job->done_error = HB_ERROR_UNKNOWN;
            *job->die = 1;
            return -1;
        }
    }

    // set encoder quality to maximum
    tmp = kAudioConverterQuality_Max;
    AudioConverterSetProperty(pv->converter, kAudioConverterCodecQuality,
                              sizeof(tmp), &tmp);

    if (audio->config.out.bitrate > 0)
    {
        // set encoder bitrate control mode to constrained variable
        tmp = kAudioCodecBitRateControlMode_VariableConstrained;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertyBitRateControlMode,
                                  sizeof(tmp), &tmp);

        // get available bitrates
        AudioValueRange *bitrates;
        ssize_t bitrateCounts;
        err = AudioConverterGetPropertyInfo(pv->converter,
                                            kAudioConverterApplicableEncodeBitRates,
                                            &tmpsiz, NULL);
        bitrates = malloc(tmpsiz);
        err = AudioConverterGetProperty(pv->converter,
                                        kAudioConverterApplicableEncodeBitRates,
                                        &tmpsiz, bitrates);
        bitrateCounts = tmpsiz / sizeof(AudioValueRange);

        // set bitrate
        tmp = audio->config.out.bitrate * 1000;
        if (tmp < bitrates[0].mMinimum)
            tmp = bitrates[0].mMinimum;
        if (tmp > bitrates[bitrateCounts-1].mMinimum)
            tmp = bitrates[bitrateCounts-1].mMinimum;
        free(bitrates);
        if (tmp != audio->config.out.bitrate * 1000)
        {
            hb_log("encCoreAudioInit: sanitizing track %d audio bitrate %d to %"PRIu32"",
                   audio->config.out.track, audio->config.out.bitrate, tmp / 1000);
        }
        AudioConverterSetProperty(pv->converter,
                                  kAudioConverterEncodeBitRate,
                                  sizeof(tmp), &tmp);
    }
    else if (audio->config.out.quality >= 0)
    {
        if (mode != AAC_MODE_LC)
        {
            hb_error("encCoreAudioInit: internal error, VBR set but not applicable");
            return 1;
        }
        // set encoder bitrate control mode to variable
        tmp = kAudioCodecBitRateControlMode_Variable;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertyBitRateControlMode,
                                  sizeof(tmp), &tmp);

        // set quality
        tmp = audio->config.out.quality;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertySoundQualityForVBR,
                                  sizeof(tmp), &tmp);
    }
    else
    {
        hb_error("encCoreAudioInit: internal error, bitrate/quality not set");
        return 1;
    }

    // get real input
    tmpsiz = sizeof(input);
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCurrentInputStreamDescription,
                              &tmpsiz, &input);
    // get real output
    tmpsiz = sizeof(output);
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCurrentOutputStreamDescription,
                              &tmpsiz, &output);

    // set sizes
    pv->isamplesiz  = input.mBytesPerPacket;
    pv->isamples    = output.mFramesPerPacket;
    pv->osamplerate = output.mSampleRate;
    audio->config.out.samples_per_frame = pv->isamples;

    // channel remapping
    pv->remap = hb_audio_remap_init(AV_SAMPLE_FMT_FLT, &hb_aac_chan_map,
                                    audio->config.in.channel_map);
    if (pv->remap == NULL)
    {
        hb_error("encCoreAudioInit: hb_audio_remap_init() failed");
    }
    uint64_t layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
    hb_audio_remap_set_channel_layout(pv->remap, layout);

    // get maximum output size
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterPropertyMaximumOutputPacketSize,
                              &tmpsiz, &tmp);
    pv->omaxpacket = tmp;

    // get magic cookie (elementary stream descriptor)
    tmp = HB_CONFIG_MAX_SIZE;
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCompressionMagicCookie,
                              &tmp, w->config->extradata.bytes);
    // CoreAudio returns a complete ESDS, but we only need
    // the DecoderSpecific info.
    UInt8* buffer = NULL;
    ReadESDSDescExt(w->config->extradata.bytes, &buffer, &tmpsiz, 0);
    w->config->extradata.length = tmpsiz;
    memmove(w->config->extradata.bytes, buffer, w->config->extradata.length);
    free(buffer);

    pv->list = hb_list_init();
    pv->buf = NULL;

    return 0;
}