int main(int argc, const char *argv[]) { int nins = 0; static const int MAX_INPUT_FILES = 32; const char *infiles[MAX_INPUT_FILES]; const char *outfile = NULL; UInt32 layoutTag = 0; #if TARGET_OS_WIN32 InitializeQTML(0L); #endif for (int i = 1; i < argc; ++i) { const char *arg = argv[i]; if (arg[0] != '-') { if (nins >= MAX_INPUT_FILES) { fprintf(stderr, "too many input files\n\n"); usage(); } infiles[nins++] = arg; } else { arg += 1; if (arg[0] == 'o') { if (++i == argc) MissingArgument(); outfile = argv[i]; } else if (arg[0] == 'l' || !strcmp(arg, "-channellayout")) { if (++i == argc) MissingArgument(); layoutTag = CAChannelLayouts::StringToConstant(argv[i]); if (layoutTag == CAChannelLayouts::kInvalidTag) { fprintf(stderr, "unknown channel layout tag: %s\n\n", argv[i]); usage(); } } else { fprintf(stderr, "unknown argument: %s\n\n", arg - 1); usage(); } } } if (nins < 2 || outfile == NULL) usage(); try { if (layoutTag != 0) { CAAudioChannelLayout layout = CAAudioChannelLayout(layoutTag); Interleave(nins, infiles, outfile, &layout); } else Interleave(nins, infiles, outfile, NULL); } catch (CAXException &e) { char buf[256]; fprintf(stderr, "Error: %s (%s)\n", e.mOperation, CAXException::FormatError(buf, e.mError)); return 1; } catch (...) { fprintf(stderr, "An unknown error occurred\n"); return 1; } return 0; }
OSStatus AUPinkNoise::RemoveAudioChannelLayout(AudioUnitScope scope, AudioUnitElement element) { if (scope != kAudioUnitScope_Output) return kAudioUnitErr_InvalidScope; if (element != 0) return kAudioUnitErr_InvalidElement; mOutputChannelLayout = CAAudioChannelLayout(); return noErr; }
OSStatus AUPinkNoise::SetAudioChannelLayout( AudioUnitScope scope, AudioUnitElement element, const AudioChannelLayout * inLayout) { if (scope != kAudioUnitScope_Output) return kAudioUnitErr_InvalidScope; if (element != 0) return kAudioUnitErr_InvalidElement; UInt32 layoutChannels = inLayout ? CAAudioChannelLayout::NumberChannels(*inLayout) : 0; if (inLayout != NULL && GetOutput(element)->GetStreamFormat().NumberChannels() != layoutChannels) return kAudioUnitErr_InvalidPropertyValue; if (inLayout) mOutputChannelLayout = inLayout; else mOutputChannelLayout = CAAudioChannelLayout(); return noErr; }
OSStatus CAAudioUnit::GetChannelLayout (AudioUnitScope inScope, AudioUnitElement inEl, CAAudioChannelLayout &outLayout) const { UInt32 size; OSStatus result = AudioUnitGetPropertyInfo (AU(), kAudioUnitProperty_AudioChannelLayout, inScope, inEl, &size, NULL); if (result) return result; AudioChannelLayout *layout = (AudioChannelLayout*)malloc (size); require_noerr (result = AudioUnitGetProperty (AU(), kAudioUnitProperty_AudioChannelLayout, inScope, inEl, layout, &size), home); outLayout = CAAudioChannelLayout (layout); home: free (layout); return result; }
void CAAudioFileConverter::ConvertFile(const ConversionParameters &_params) { FSRef destFSRef; UInt32 propertySize; CAStreamBasicDescription destFormat; CAAudioChannelLayout origSrcFileLayout, srcFileLayout, destFileLayout; bool openedSourceFile = false, createdOutputFile = false; mParams = _params; mReadBuffer = NULL; mReadPtrs = NULL; CABufferList *writeBuffer = NULL; CABufferList *writePtrs = NULL; PrepareConversion(); try { if (TaggedDecodingFromCAF()) ReadCAFInfo(); OpenInputFile(); openedSourceFile = true; // get input file's format const CAStreamBasicDescription &srcFormat = mSrcFile.GetFileDataFormat(); if (mParams.flags & kOpt_Verbose) { printf("Input file: %s, %qd frames\n", mParams.input.filePath ? basename(mParams.input.filePath) : "?", mSrcFile.GetNumberFrames()); } mSrcFormat = srcFormat; bool encoding = !destFormat.IsPCM(); bool decoding = !srcFormat.IsPCM(); // prepare output file's format destFormat = mParams.output.dataFormat; if (!encoding && destFormat.mSampleRate == 0.) // on encode, it's OK to have a 0 sample rate; ExtAudioFile will get the SR from the converter and set it on the file. // on decode or PCM->PCM, a sample rate of 0 is interpreted as using the source sample rate destFormat.mSampleRate = srcFormat.mSampleRate; // source channel layout srcFileLayout = mSrcFile.GetFileChannelLayout(); origSrcFileLayout = srcFileLayout; if (mParams.input.channelLayoutTag != 0) { XThrowIf(AudioChannelLayoutTag_GetNumberOfChannels(mParams.input.channelLayoutTag) != srcFormat.mChannelsPerFrame, -1, "input channel layout has wrong number of channels for file"); srcFileLayout = CAAudioChannelLayout(mParams.input.channelLayoutTag); mSrcFile.SetFileChannelLayout(srcFileLayout); } // destination channel layout int outChannels = mParams.output.channels; if (mParams.output.channelLayoutTag != 0) { // use the one specified by caller, if any destFileLayout = CAAudioChannelLayout(mParams.output.channelLayoutTag); } else if (srcFileLayout.IsValid()) { // otherwise, assume the same as the source, if any destFileLayout = srcFileLayout; } if (destFileLayout.IsValid()) { // the output channel layout specifies the number of output channels if (outChannels != -1) XThrowIf((unsigned)outChannels != destFileLayout.NumberChannels(), -1, "output channel layout has wrong number of channels"); else outChannels = destFileLayout.NumberChannels(); } if (!(mParams.flags & kOpt_NoSanitizeOutputFormat)) { // adjust the output format's channels; output.channels overrides the channels if (outChannels == -1) outChannels = srcFormat.mChannelsPerFrame; if (outChannels > 0) { destFormat.mChannelsPerFrame = outChannels; destFormat.mBytesPerPacket *= outChannels; destFormat.mBytesPerFrame *= outChannels; } // use AudioFormat API to clean up the output format propertySize = sizeof(AudioStreamBasicDescription); XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propertySize, &destFormat), "get destination format info"); } OpenOutputFile(srcFormat, destFormat, destFSRef, destFileLayout); createdOutputFile = true; mDestFormat = destFormat; // set up client formats CAStreamBasicDescription srcClientFormat, destClientFormat; { CAAudioChannelLayout srcClientLayout, destClientLayout; if (encoding) { if (decoding) { // transcoding // XThrowIf(encoding && decoding, -1, "transcoding not currently supported"); if (srcFormat.mChannelsPerFrame > 2 || destFormat.mChannelsPerFrame > 2) CAXException::Warning("Transcoding multichannel audio may not handle channel layouts correctly", 0); srcClientFormat.SetCanonical(std::min(srcFormat.mChannelsPerFrame, destFormat.mChannelsPerFrame), true); srcClientFormat.mSampleRate = std::max(srcFormat.mSampleRate, destFormat.mSampleRate); mSrcFile.SetClientFormat(srcClientFormat, NULL); destClientFormat = srcClientFormat; } else { // encoding srcClientFormat = srcFormat; destClientFormat = srcFormat; } // by here, destClientFormat will have a valid sample rate destClientLayout = srcFileLayout.IsValid() ? srcFileLayout : destFileLayout; mDestFile.SetClientFormat(destClientFormat, &destClientLayout); } else { // decoding or PCM->PCM if (destFormat.mSampleRate == 0.) destFormat.mSampleRate = srcFormat.mSampleRate; destClientFormat = destFormat; srcClientFormat = destFormat; srcClientLayout = destFileLayout; mSrcFile.SetClientFormat(srcClientFormat, &srcClientLayout); } } XThrowIf(srcClientFormat.mBytesPerPacket == 0, -1, "source client format not PCM"); XThrowIf(destClientFormat.mBytesPerPacket == 0, -1, "dest client format not PCM"); if (encoding) { // set the bitrate if (mParams.output.bitRate != -1) { if (mParams.flags & kOpt_Verbose) printf("bitrate = %ld\n", mParams.output.bitRate); mDestFile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &mParams.output.bitRate); } // set the codec quality if (mParams.output.codecQuality != -1) { if (mParams.flags & kOpt_Verbose) printf("codec quality = %ld\n", mParams.output.codecQuality); mDestFile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &mParams.output.codecQuality); } // set the bitrate strategy -- called bitrate format in the codecs since it had already shipped if (mParams.output.strategy != -1) { if (mParams.flags & kOpt_Verbose) printf("strategy = %ld\n", mParams.output.strategy); mDestFile.SetConverterProperty(kAudioCodecBitRateFormat, sizeof(UInt32), &mParams.output.strategy); } } // set the SRC quality if (mParams.output.srcQuality != -1) { if (srcFormat.mSampleRate != 0. && destFormat.mSampleRate != 0. && srcFormat.mSampleRate != destFormat.mSampleRate) { if (mParams.flags & kOpt_Verbose) printf("SRC quality = %ld\n", mParams.output.srcQuality); if (encoding) mDestFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality); else mSrcFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality); } } if (decoding) { if (mParams.output.primeMethod != -1) mSrcFile.SetConverterProperty(kAudioConverterPrimeMethod, sizeof(UInt32), &mParams.output.primeMethod); } PrintFormats(&origSrcFileLayout); // prepare I/O buffers UInt32 bytesToRead = 0x10000; UInt32 framesToRead = bytesToRead; // OK, ReadPackets will limit as appropriate ComputeReadSize(srcFormat, destFormat, bytesToRead, framesToRead); // const SInt64 totalFrames = mSrcFile.GetNumberFrames(); //#warning "GetNumberFrames() can be prohibitively slow for some formats" mReadBuffer = CABufferList::New("readbuf", srcClientFormat); mReadBuffer->AllocateBuffers(bytesToRead); mReadPtrs = CABufferList::New("readptrs", srcClientFormat); BeginConversion(); while (true) { //XThrowIf(Progress(mSrcFile.Tell(), totalFrames), userCanceledErr, "user stopped"); // this was commented out for awhile -- performance? make it optional? UInt32 nFrames = framesToRead; mReadPtrs->SetFrom(mReadBuffer); AudioBufferList *readbuf = &mReadPtrs->GetModifiableBufferList(); mSrcFile.Read(nFrames, readbuf); //printf("read %ld of %ld frames\n", nFrames, framesToRead); if (nFrames == 0) break; mDestFile.Write(nFrames, readbuf); if (ShouldTerminateConversion()) break; } if (decoding) { // fix up the destination file's length if necessary and possible SInt64 nframes = mSrcFile.GetNumberFrames(); if (nframes != 0) { // only shorten, don't try to lengthen nframes = SInt64(ceil(nframes * destFormat.mSampleRate / srcFormat.mSampleRate)); if (nframes < mDestFile.GetNumberFrames()) { mDestFile.SetNumberFrames(nframes); } } } EndConversion(); } catch (...) { delete mReadBuffer; delete mReadPtrs; delete writeBuffer; delete writePtrs; if (!createdOutputFile) PrintFormats(&origSrcFileLayout); try { mSrcFile.Close(); } catch (...) { } try { mDestFile.Close(); } catch (...) { } if (createdOutputFile) unlink(mOutName); throw; } delete mReadBuffer; delete mReadPtrs; delete writeBuffer; delete writePtrs; mSrcFile.Close(); mDestFile.Close(); if (TaggedEncodingToCAF()) WriteCAFInfo(); if (mParams.flags & kOpt_Verbose) { // must close to flush encoder; GetNumberFrames() not necessarily valid until afterwards but then // the file is closed CAAudioFile temp; FSRef destFSRef; if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) { temp.Open(destFSRef); printf("Output file: %s, %qd frames\n", basename(mOutName), temp.GetNumberFrames()); } } }
void CAChannelMappingPlayer::SetupChannelMapping() { delete mMapper; mMapper = NULL; const CAStreamBasicDescription &fileFormat = GetFile().GetClientDataFormat(); CAStreamBasicDescription deviceFormat; UInt32 propertySize = sizeof(AudioStreamBasicDescription); XThrowIfError(AudioUnitGetProperty( GetOutputUnit(), kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, (void *)&deviceFormat, &propertySize), "get output device's format"); #if VERBOSE printf("CAChannelMappingPlayer::SetupChannelMapping: %ld-ch file, %ld-ch device\n", fileFormat.mChannelsPerFrame, deviceFormat.mChannelsPerFrame); #endif if (fileFormat.mChannelsPerFrame <= deviceFormat.mChannelsPerFrame) { // no mapping needed, use output unit's default behavior // (default stereo pair and speaker config from AMS) #if VERBOSE printf(" using output unit's channel mapping\n"); #endif CAAudioFilePlayer::SetupChannelMapping(); } else { // fewer device than file channels, mapping needed CAAudioChannelLayout fileLayout, deviceLayout; #if VERBOSE printf(" using our own channel mapping\n"); #endif deviceFormat.mSampleRate = fileFormat.mSampleRate; deviceFormat.SetCanonical(deviceFormat.mChannelsPerFrame, false); // force deinterleaved fileLayout = GetFile().GetFileChannelLayout(); UInt32 layoutSize; Boolean writable; OSStatus err = AudioUnitGetPropertyInfo( GetOutputUnit(), kAudioUnitProperty_AudioChannelLayout, kAudioUnitScope_Input, 0, &layoutSize, &writable); if (!err) { char *buf = (char *)malloc(layoutSize); err = AudioUnitGetProperty( GetOutputUnit(), kAudioUnitProperty_AudioChannelLayout, kAudioUnitScope_Input, 0, buf, &layoutSize); deviceLayout = CAAudioChannelLayout(reinterpret_cast<AudioChannelLayout *>(buf)); free(buf); } mMapper = new CAChannelMapper(fileFormat, deviceFormat, &fileLayout, &deviceLayout); // give the output unit the same number of channels as in the device, // since we'll be doing the mapping ourselves XThrowIfError(AudioUnitSetProperty( GetOutputUnit(), kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, (void *)&deviceFormat, sizeof(AudioStreamBasicDescription)), "set audio output format"); XThrowIfError(mMapper->OpenMixer(fileFormat.mSampleRate), "open mixer"); XThrowIfError(mMapper->ConfigureDownmix(), "configure downmix"); AudioUnitConnection conn; conn.sourceAudioUnit = mMapper->GetMixer(); conn.sourceOutputNumber = 0; conn.destInputNumber = 0; XThrowIfError(AudioUnitSetProperty( GetOutputUnit(), kAudioUnitProperty_MakeConnection, kAudioUnitScope_Global, 0, (void *)&conn, sizeof(AudioUnitConnection)), "connect mixer to output unit"); AURenderCallbackStruct input; input.inputProc = InputProc; input.inputProcRefCon = this; XThrowIfError(AudioUnitSetProperty( conn.sourceAudioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &input, sizeof(input)), "connect input proc to mixer"); // provide NO channel layout // mReadBuf = CABufferList::New("", fileFormat); // mReadBuf->AllocateBuffers( } }