コード例 #1
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
/*
	Example: AAC, 1024 frames/packet, 2112 frame offset

                                           2112
                                             |
    Absolute frames:  0       1024      2048 |    3072
                      +---------+---------+--|------+---------+---------+
    Packets:          |    0    |    1    |  | 2    |    3    |    4    |
                      +---------+---------+--|------+---------+---------+
    Client frames:  -2112   -1088       -64  |     960						SeekToFrame, TellFrame
                                             |
                                             0

	*   Offset between absolute and client frames is mFrame0Offset.
	*** mFrameMark is in client frames ***

	Examples:
		clientFrame					0		960		1000	1024
		absoluteFrame				2112	3072	3112	3136
		packet						0		0		0		1
		tempFrameMark*				-2112	-2112	-2112	-1088
		mFramesToSkipFollowingSeek	2112	3072	3112	2112
*/
void	CAAudioFile::Seek(SInt64 clientFrame)
{
	if (clientFrame == mFrameMark)
		return; // already there! don't reset converter

	//SInt64 absoluteFrame = clientFrame + mFrame0Offset;
	XThrowIf(mMode != kReading || clientFrame < 0 || !mClientDataFormat.IsPCM(), kExtAudioFileError_InvalidSeek, "seek to frame in audio file");

#if VERBOSE_IO
	SInt64 prevFrameMark = mFrameMark;
#endif

	SInt64 packet;
	packet = FrameToPacket(clientFrame);
	if (packet < 0)
		packet = 0;
	SeekToPacket(packet);
	// this will have backed up mFrameMark to match the beginning of the packet
	mFramesToSkipFollowingSeek = std::max(UInt32(clientFrame - mFrameMark), UInt32(0));
	mFrameMark = clientFrame;

#if VERBOSE_IO
	printf("CAAudioFile::SeekToFrame: frame %qd (from %qd), packet %qd, skip %ld frames\n", mFrameMark, prevFrameMark, packet, mFramesToSkipFollowingSeek);
#endif
}
コード例 #2
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::Write(UInt32 numPackets, const AudioBufferList *data)
{
	if (mIOBufferList.mBuffers[0].mData == NULL) {
#if DEBUG
		printf("warning: CAAudioFile::AllocateBuffers called from WritePackets\n");
#endif
		AllocateBuffers();
	}

	if (mMode == kPreparingToWrite)
		mMode = kWriting;
	else
		XThrowIf(mMode != kWriting, kExtAudioFileError_InvalidOperationOrder, "can't write to this file");
	if (mConverter != NULL) {
		mWritePackets = numPackets;
		mWriteBufferList->SetFrom(data);
		WritePacketsFromCallback(WriteInputProc, this);
	} else {
		StartTiming(this, write);
		XThrowIfError(AudioFileWritePackets(mAudioFile, mUseCache, data->mBuffers[0].mDataByteSize,
						NULL, mPacketMark, &numPackets, data->mBuffers[0].mData),
						"write audio file");
		ElapsedTime(this, write, mTicksInIO);
#if VERBOSE_IO
		printf("CAAudioFile::WritePackets: wrote %ld packets at %qd, %ld bytes\n", numPackets, mPacketMark, data->mBuffers[0].mDataByteSize);
#endif
		//mNumberPackets =
		mPacketMark += numPackets;
		if (mFileDataFormat.mFramesPerPacket > 0)
			mFrameMark += numPackets * mFileDataFormat.mFramesPerPacket;
		// else: shouldn't happen since we're only called when there's no converter
	}
}
コード例 #3
0
void	CAAudioFileConverter::OpenOutputFile(const CAStreamBasicDescription &srcFormat, const CAStreamBasicDescription &destFormat, FSRef &destFSRef, CAAudioChannelLayout &destFileLayout)
{
	const ConversionParameters &params = mParams;
	
	// output file
	if (params.output.filePath == NULL) {
		GenerateOutputFileName(params.input.filePath, srcFormat,
					destFormat, params.output.fileType, mOutName);
	} else
		strcpy(mOutName, params.output.filePath);
	
	// deal with pre-existing output file
	if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) {
		XThrowIf(!(params.flags & kOpt_OverwriteOutputFile), 1, "overwrite output file");
			// not allowed to overwrite
		// output file exists - delete it
		XThrowIfError(FSDeleteObject(&destFSRef), "delete output file");
	}
	// get FSRef/CFStringRef for output file
	FSRef outFolderFSRef;
	CFStringRef outFileName;
	XThrowIfError(PosixPathToParentFSRefAndName(mOutName, outFolderFSRef, outFileName), "locate output audio file");
	
	// create the output file
	mDestFile.CreateNew(outFolderFSRef, outFileName, params.output.fileType, destFormat, &destFileLayout.Layout());
	CFRelease(outFileName);
}
コード例 #4
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::Open(const FSRef &fsref)
{
	LOG_FUNCTION("CAAudioFile::Open", "%p", this);
	XThrowIf(mMode != kClosed, kExtAudioFileError_InvalidOperationOrder, "file already open");
	mFSRef = fsref;
	XThrowIfError(AudioFileOpen(&mFSRef, fsRdPerm, 0, &mAudioFile), "open audio file");
	mOwnOpenFile = true;
	mMode = kReading;
	GetExistingFileInfo();
}
コード例 #5
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::Wrap(AudioFileID fileID, bool forWriting)
{
	LOG_FUNCTION("CAAudioFile::Wrap", "%p", this);
	XThrowIf(mMode != kClosed, kExtAudioFileError_InvalidOperationOrder, "file already open");

	mAudioFile = fileID;
	mOwnOpenFile = false;
	mMode = forWriting ? kPreparingToWrite : kReading;
	GetExistingFileInfo();
	if (forWriting)
		FileFormatChanged();
}
コード例 #6
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::SetConverterChannelLayout(bool output, const CAAudioChannelLayout &layout)
{
	LOG_FUNCTION("CAAudioFile::SetConverterChannelLayout", "%p", this);
	OSStatus err;

	if (layout.IsValid()) {
#if VERBOSE_CHANNELMAP
		printf("Setting converter's %s channel layout: %s\n", output ? "output" : "input",
			CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
		if (output) {
			err = AudioConverterSetProperty(mConverter, kAudioConverterOutputChannelLayout,
				layout.Size(), &layout.Layout());
			XThrowIf(err && err != kAudioConverterErr_OperationNotSupported, err, "couldn't set converter's output channel layout");
		} else {
			err = AudioConverterSetProperty(mConverter, kAudioConverterInputChannelLayout,
				layout.Size(), &layout.Layout());
			XThrowIf(err && err != kAudioConverterErr_OperationNotSupported, err, "couldn't set converter's input channel layout");
		}
		if (mMode == kPreparingToWrite)
			FileFormatChanged();
	}
}
コード例 #7
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::CreateNew(const FSRef &parentDir, CFStringRef filename, AudioFileTypeID filetype, const AudioStreamBasicDescription &dataFormat, const AudioChannelLayout *layout)
{
	LOG_FUNCTION("CAAudioFile::CreateNew", "%p", this);
	XThrowIf(mMode != kClosed, kExtAudioFileError_InvalidOperationOrder, "file already open");

	mFileDataFormat = dataFormat;
	if (layout) {
		mFileChannelLayout = layout;
#if VERBOSE_CHANNELMAP
		printf("PrepareNew passed channel layout: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
	}
	mMode = kPreparingToCreate;
	FileFormatChanged(&parentDir, filename, filetype);
}
コード例 #8
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::WritePacketsFromCallback(
								AudioConverterComplexInputDataProc	inInputDataProc,
								void *								inInputDataProcUserData)
{
	while (true) {
		// keep writing until we exhaust the input (temporary stop), or produce no output (EOF)
		UInt32 numEncodedPackets = mIOBufferSizePackets;
		mIOBufferList.mBuffers[0].mDataByteSize = mIOBufferSizeBytes;
#if CAAUDIOFILE_PROFILE
		mInConverter = true;
#endif
		StartTiming(this, fill);
		OSStatus err = AudioConverterFillComplexBuffer(mConverter, inInputDataProc, inInputDataProcUserData,
					&numEncodedPackets, &mIOBufferList, mPacketDescs);
		ElapsedTime(this, fill, mTicksInConverter);
#if CAAUDIOFILE_PROFILE
		mInConverter = false;
#endif
		XThrowIf(err != 0 && err != kNoMoreInputRightNow, err, "convert audio packets (write)");
		if (numEncodedPackets == 0)
			break;
		Byte *buf = (Byte *)mIOBufferList.mBuffers[0].mData;
#if VERBOSE_IO
		printf("CAAudioFile::WritePacketsFromCallback: wrote %ld packets, %ld bytes\n", numEncodedPackets, mIOBufferList.mBuffers[0].mDataByteSize);
		if (mPacketDescs) {
			for (UInt32 i = 0; i < numEncodedPackets; ++i) {
				printf("  write packet %qd : offset %qd, length %ld\n", mPacketMark + i, mPacketDescs[i].mStartOffset, mPacketDescs[i].mDataByteSize);
#if VERBOSE_IO >= 2
				hexdump(buf + mPacketDescs[i].mStartOffset, mPacketDescs[i].mDataByteSize);
#endif
			}
		}
#endif
		StartTiming(this, write);
		XThrowIfError(AudioFileWritePackets(mAudioFile, mUseCache, mIOBufferList.mBuffers[0].mDataByteSize, mPacketDescs, mPacketMark, &numEncodedPackets, buf), "write audio file");
		ElapsedTime(this, write, mTicksInIO);
		mPacketMark += numEncodedPackets;
		//mNumberPackets += numEncodedPackets;
		if (mFileDataFormat.mFramesPerPacket > 0)
			mFrameMark += numEncodedPackets * mFileDataFormat.mFramesPerPacket;
		else {
			for (UInt32 i = 0; i < numEncodedPackets; ++i)
				mFrameMark += mPacketDescs[i].mVariableFramesInPacket;
		}
		if (err == kNoMoreInputRightNow)
			break;
	}
}
コード例 #9
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
void	CAAudioFile::SeekToPacket(SInt64 packetNumber)
{
#if VERBOSE_IO
	printf("CAAudioFile::SeekToPacket: %qd\n", packetNumber);
#endif
	XThrowIf(mMode != kReading || packetNumber < 0 /*|| packetNumber >= mNumberPackets*/ , kExtAudioFileError_InvalidSeek, "seek to packet in audio file");
	if (mPacketMark == packetNumber)
		return; // already there! don't reset converter
	mPacketMark = packetNumber;

	mFrameMark = PacketToFrame(packetNumber) - mFrame0Offset;
	mFramesToSkipFollowingSeek = 0;
	if (mConverter)
		// must reset -- if we reached end of stream. converter will no longer work otherwise
		AudioConverterReset(mConverter);
}
コード例 #10
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//	Allocates: mIOBufferList, mIOBufferSizePackets, mPacketDescs
//	Dependent on: mFileMaxPacketSize, mIOBufferSizeBytes
void	CAAudioFile::AllocateBuffers(bool okToFail)
{
	LOG_FUNCTION("CAAudioFile::AllocateBuffers", "%p", this);
	if (mFileMaxPacketSize == 0) {
		if (okToFail)
			return;
		XThrowIf(true, kExtAudioFileError_MaxPacketSizeUnknown, "file's maximum packet size is 0");
	}
	UInt32 bufferSizeBytes = mIOBufferSizeBytes = std::max(mIOBufferSizeBytes, mFileMaxPacketSize);
		// must be big enough for at least one maximum size packet

	if (mIOBufferList.mBuffers[0].mDataByteSize != bufferSizeBytes) {
		mIOBufferList.mNumberBuffers = 1;
		mIOBufferList.mBuffers[0].mNumberChannels = mFileDataFormat.mChannelsPerFrame;
		if (!mClientOwnsIOBuffer) {
			//printf("reallocating I/O buffer\n");
			delete[] (Byte *)mIOBufferList.mBuffers[0].mData;
			mIOBufferList.mBuffers[0].mData = new Byte[bufferSizeBytes];
		}
		mIOBufferList.mBuffers[0].mDataByteSize = bufferSizeBytes;
		mIOBufferSizePackets = bufferSizeBytes / mFileMaxPacketSize;
	}

	UInt32 propertySize = sizeof(UInt32);
	UInt32 externallyFramed;
	XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatIsExternallyFramed,
			sizeof(AudioStreamBasicDescription), &mFileDataFormat, &propertySize, &externallyFramed),
			"is format externally framed");
	if (mNumPacketDescs != (externallyFramed ? mIOBufferSizePackets : 0)) {
		delete[] mPacketDescs;
		mPacketDescs = NULL;
		mNumPacketDescs = 0;

		if (externallyFramed) {
			//printf("reallocating packet descs\n");
			mPacketDescs = new AudioStreamPacketDescription[mIOBufferSizePackets];
			mNumPacketDescs = mIOBufferSizePackets;
		}
	}
}
コード例 #11
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::Read(UInt32 &ioNumPackets, AudioBufferList *ioData)
			// May read fewer packets than requested if:
			//		buffer is not big enough
			//		file does not contain that many more packets
			// Note that eofErr is not fatal, just results in 0 packets returned
			// ioData's buffer sizes may be shortened
{
	XThrowIf(mClientMaxPacketSize == 0, kExtAudioFileError_MaxPacketSizeUnknown, "client maximum packet size is 0");
	if (mIOBufferList.mBuffers[0].mData == NULL) {
#if DEBUG
		printf("warning: CAAudioFile::AllocateBuffers called from ReadPackets\n");
#endif
		AllocateBuffers();
	}
	UInt32 bufferSizeBytes = ioData->mBuffers[0].mDataByteSize;
	UInt32 maxNumPackets = bufferSizeBytes / mClientMaxPacketSize;
	// older versions of AudioConverterFillComplexBuffer don't do this, so do our own sanity check
	UInt32 nPackets = std::min(ioNumPackets, maxNumPackets);

	mMaxPacketsToRead = ~0UL;

	if (mClientDataFormat.mFramesPerPacket == 1) {  // PCM or equivalent
		while (mFramesToSkipFollowingSeek > 0) {
			UInt32 skipFrames = std::min(mFramesToSkipFollowingSeek, maxNumPackets);
			UInt32 framesPerPacket;
			if ((framesPerPacket=mFileDataFormat.mFramesPerPacket) > 0)
				mMaxPacketsToRead = (skipFrames + framesPerPacket - 1) / framesPerPacket;

			if (mConverter == NULL) {
				XThrowIfError(ReadInputProc(NULL, &skipFrames, ioData, NULL, this), "read audio file");
			} else {
#if CAAUDIOFILE_PROFILE
				mInConverter = true;
#endif
				StartTiming(this, fill);
				XThrowIfError(AudioConverterFillComplexBuffer(mConverter, ReadInputProc, this, &skipFrames, ioData, NULL), "convert audio packets (pcm read)");
				ElapsedTime(this, fill, mTicksInConverter);
#if CAAUDIOFILE_PROFILE
				mInConverter = false;
#endif
			}
			if (skipFrames == 0) {	// hit EOF
				ioNumPackets = 0;
				return;
			}
			mFrameMark += skipFrames;
#if VERBOSE_IO
			printf("CAAudioFile::ReadPackets: skipped %ld frames\n", skipFrames);
#endif

			mFramesToSkipFollowingSeek -= skipFrames;

			// restore mDataByteSize
			for (int i = ioData->mNumberBuffers; --i >= 0 ; )
				ioData->mBuffers[i].mDataByteSize = bufferSizeBytes;
		}
	}

	if (mFileDataFormat.mFramesPerPacket > 0)
		// don't read more packets than we are being asked to produce
		mMaxPacketsToRead = nPackets / mFileDataFormat.mFramesPerPacket + 1;
	if (mConverter == NULL) {
		XThrowIfError(ReadInputProc(NULL, &nPackets, ioData, NULL, this), "read audio file");
	} else {
#if CAAUDIOFILE_PROFILE
		mInConverter = true;
#endif
		StartTiming(this, fill);
		XThrowIfError(AudioConverterFillComplexBuffer(mConverter, ReadInputProc, this, &nPackets, ioData, NULL), "convert audio packets (read)");
		ElapsedTime(this, fill, mTicksInConverter);
#if CAAUDIOFILE_PROFILE
		mInConverter = false;
#endif
	}
	if (mClientDataFormat.mFramesPerPacket == 1)
		mFrameMark += nPackets;

	ioNumPackets = nPackets;
}
コード例 #12
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::SetClientFormat(const CAStreamBasicDescription &dataFormat, const CAAudioChannelLayout *layout)
{
	LOG_FUNCTION("CAAudioFile::SetClientFormat", "%p", this);
	XThrowIf(!dataFormat.IsPCM(), kExtAudioFileError_NonPCMClientFormat, "non-PCM client format on audio file");

	bool dataFormatChanging = (mClientDataFormat.mFormatID == 0 || mClientDataFormat != dataFormat);

	if (dataFormatChanging) {
		CloseConverter();
		if (mWriteBufferList) {
			delete mWriteBufferList;
			mWriteBufferList = NULL;
		}
		mClientDataFormat = dataFormat;
	}

	if (layout && layout->IsValid()) {
		XThrowIf(layout->NumberChannels() != mClientDataFormat.NumberChannels(), kExtAudioFileError_InvalidChannelMap, "inappropriate channel map");
		mClientChannelLayout = *layout;
	}

	bool differentLayouts;
	if (mClientChannelLayout.IsValid()) {
		if (mFileChannelLayout.IsValid()) {
			differentLayouts = mClientChannelLayout.Tag() != mFileChannelLayout.Tag();
#if VERBOSE_CHANNELMAP
			printf("two valid layouts, %s\n", differentLayouts ? "different" : "same");
#endif
		} else {
			differentLayouts = false;
#if VERBOSE_CHANNELMAP
			printf("valid client layout, unknown file layout\n");
#endif
		}
	} else {
		differentLayouts = false;
#if VERBOSE_CHANNELMAP
		if (mFileChannelLayout.IsValid())
			printf("valid file layout, unknown client layout\n");
		else
			printf("two invalid layouts\n");
#endif
	}

	if (mClientDataFormat != mFileDataFormat || differentLayouts) {
		// We need an AudioConverter.
		if (mMode == kReading) {
			// file -> client (decode)
//mFileDataFormat.PrintFormat(  stdout, "", "File:   ");
//mClientDataFormat.PrintFormat(stdout, "", "Client: ");

			if (mConverter == NULL)
				XThrowIfError(AudioConverterNew(&mFileDataFormat, &mClientDataFormat, &mConverter),
				"create audio converter");

#if VERBOSE_CONVERTER
			printf("CAAudioFile %p -- created converter\n", this);
			CAShow(mConverter);
#endif
			// set the magic cookie, if any (for decode)
			if (mMagicCookie)
				SetConverterProperty(kAudioConverterDecompressionMagicCookie, mMagicCookieSize, mMagicCookie, mFileDataFormat.IsPCM());
					// we get cookies from some AIFF's but the converter barfs on them,
					// so we set canFail to true for PCM

			SetConverterChannelLayout(false, mFileChannelLayout);
			SetConverterChannelLayout(true, mClientChannelLayout);

			// propagate leading/trailing frame counts
			if (mFileDataFormat.mBitsPerChannel == 0) {
				UInt32 propertySize;
				OSStatus err;
				AudioFilePacketTableInfo pti;
				propertySize = sizeof(pti);
				err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, &propertySize, &pti);
				if (err == noErr && (pti.mPrimingFrames > 0 || pti.mRemainderFrames > 0)) {
					AudioConverterPrimeInfo primeInfo;
					primeInfo.leadingFrames = pti.mPrimingFrames;
					primeInfo.trailingFrames = pti.mRemainderFrames;
					/* ignore any error. better to play it at all than not. */
					/*err = */AudioConverterSetProperty(mConverter, kAudioConverterPrimeInfo, sizeof(primeInfo), &primeInfo);
					//XThrowIfError(err, "couldn't set prime info on converter");
				}
			}
		} else if (mMode == kPreparingToCreate || mMode == kPreparingToWrite) {
			// client -> file (encode)
			if (mConverter == NULL)
				XThrowIfError(AudioConverterNew(&mClientDataFormat, &mFileDataFormat, &mConverter), "create audio converter");
			mWriteBufferList = CABufferList::New("", mClientDataFormat);
			SetConverterChannelLayout(false, mClientChannelLayout);
			SetConverterChannelLayout(true, mFileChannelLayout);
			if (mMode == kPreparingToWrite)
				FileFormatChanged();
		} else
			XThrowIfError(kExtAudioFileError_InvalidOperationOrder, "audio file format not yet known");
	}
	UpdateClientMaxPacketSize();
}
コード例 #13
0
void	Interleave(int nInputs, const char *infilenames[], const char *outfilename, const CAAudioChannelLayout *layout)
{
	const UInt32 kBufferSizeFrames = 0x8000;
	const UInt32 kBufferSizeBytes = kBufferSizeFrames * sizeof(Float32);
	class FileAndBuffer : public CAAudioFile {
	public:
		FileAndBuffer() : mBuf(NULL), mPtrs(NULL) { }
		~FileAndBuffer() { delete mBuf; delete mPtrs; }
		
		CABufferList *	mBuf;
		CABufferList *	mPtrs;
	};
	FileAndBuffer *infiles = new FileAndBuffer[nInputs], *file;
	FileAndBuffer outfile;
	int i;
	UInt32 outputChannels = 0;
	double sampleRate = 0.;
	UInt32 maxBitDepth = 0;
	CAStreamBasicDescription clientFormat;
	bool outFileCreated = false;
	
	try {
		// set up input files
		for (i = 0; i < nInputs; ++i) {
			file = &infiles[i];
			file->Open(infilenames[i]);
			const CAStreamBasicDescription &fmt = file->GetFileDataFormat();
			//fmt.PrintFormat(stdout, "", "input file");
			XThrowIf(fmt.mFormatID != kAudioFormatLinearPCM, -1, "input files must be PCM");
			outputChannels += fmt.mChannelsPerFrame;
			if (sampleRate == 0.)
				sampleRate = fmt.mSampleRate;
			else
				XThrowIf(fmt.mSampleRate != sampleRate, -1, "input files must have the same sample rate");
			if (fmt.mBitsPerChannel > maxBitDepth)
				maxBitDepth = fmt.mBitsPerChannel;
			clientFormat.mSampleRate = sampleRate;
			clientFormat.SetCanonical(fmt.mChannelsPerFrame, false);	// deinterleaved
			file->SetClientFormat(clientFormat, NULL);
			file->mBuf = CABufferList::New("readbuf", clientFormat);
			file->mBuf->AllocateBuffers(kBufferSizeBytes);
			file->mPtrs = CABufferList::New("readptrs", clientFormat);
			//clientFormat.PrintFormat(stdout, "", "input client");
		}
		
		if (layout != NULL) {
			if (AudioChannelLayoutTag_GetNumberOfChannels(layout->Tag()) != outputChannels) {
				fprintf(stderr, "Channel layout tag '%s' is inappropriate for %u channels of audio -- aborting\n", 
					CAChannelLayouts::ConstantToString(layout->Tag()), (unsigned)outputChannels);
				exit(2);
			}
		}

		// prepare output file format
		CAStreamBasicDescription outfmt;
		outfmt.mSampleRate = sampleRate;
		outfmt.mFormatID = kAudioFormatLinearPCM;
		outfmt.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
		outfmt.mBitsPerChannel = maxBitDepth;
		outfmt.mChannelsPerFrame = outputChannels;
		outfmt.mBytesPerPacket = outfmt.mBytesPerFrame = outputChannels * (maxBitDepth >> 3);
		outfmt.mFramesPerPacket = 1;
		//outfmt.PrintFormat(stdout, "", "output file");
		
		unlink(outfilename);
		FSRef parentDir;
		CFStringRef outName;
		XThrowIfError(PosixPathToParentFSRefAndName(outfilename, parentDir, outName), "Couldn't locate output directory");
		outfile.CreateNew(parentDir, outName, kAudioFileAIFFType, outfmt, layout ? &layout->Layout() : NULL);
		outFileCreated = true;
		
		// create the output file and buffers
		clientFormat.mSampleRate = sampleRate;
		clientFormat.SetCanonical(outputChannels, false);
		outfile.SetClientFormat(clientFormat, NULL);
		//clientFormat.PrintFormat(stdout, "", "output client");

		outfile.mPtrs = CABufferList::New("writeptrs", clientFormat);

		AudioBufferList &writebufs = outfile.mPtrs->GetModifiableBufferList();
		
		while (true) {
			UInt32 maxFramesRead = 0;
			UInt32 nframes;
			int outbuf = 0;
			for (i = 0; i < nInputs; ++i) {
				file = &infiles[i];
				file->mPtrs->SetFrom(file->mBuf);
				nframes = kBufferSizeFrames;
				AudioBufferList &readbufs = file->mPtrs->GetModifiableBufferList();
				file->Read(nframes, &readbufs);
				//CAShowAudioBufferList(&readbufs, 8, 0);
				if (nframes > maxFramesRead)
					maxFramesRead = nframes;
				if (nframes < kBufferSizeFrames)
					file->mPtrs->PadWithZeroes(kBufferSizeBytes);

				memcpy(&writebufs.mBuffers[outbuf], &readbufs.mBuffers[0], 
					readbufs.mNumberBuffers * sizeof(AudioBuffer));
				outbuf += readbufs.mNumberBuffers;
			}
			if (maxFramesRead == 0)
				break;

			if (maxFramesRead < kBufferSizeFrames)
				outfile.mPtrs->SetNumBytes(maxFramesRead * sizeof(Float32));
			//CAShowAudioBufferList(&writebufs, 8, 0);
			outfile.Write(maxFramesRead, &writebufs);
			if (maxFramesRead < kBufferSizeFrames)
				break;
		}
	}
	catch (...) {
		if (outFileCreated)
			unlink(outfilename);
		delete[] infiles;
		throw;
	}
	outfile.Close();
	// input files are closed from destructors
	delete[] infiles;
}
コード例 #14
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
// called to create the file -- or update its format/channel layout/properties based on an encoder
// setting change
void	CAAudioFile::FileFormatChanged(const FSRef *parentDir, CFStringRef filename, AudioFileTypeID filetype)
{
	LOG_FUNCTION("CAAudioFile::FileFormatChanged", "%p", this);
	XThrowIf(mMode != kPreparingToCreate && mMode != kPreparingToWrite, kExtAudioFileError_InvalidOperationOrder, "new file not prepared");

	UInt32 propertySize;
	OSStatus err;
	AudioStreamBasicDescription saveFileDataFormat = mFileDataFormat;

#if VERBOSE_CONVERTER
	mFileDataFormat.PrintFormat(stdout, "", "Specified file data format");
#endif

	// Find out the actual format the converter will produce. This is necessary in
	// case the bitrate has forced a lower sample rate, which needs to be set correctly
	// in the stream description passed to AudioFileCreate.
	if (mConverter != NULL) {
		propertySize = sizeof(AudioStreamBasicDescription);
		Float64 origSampleRate = mFileDataFormat.mSampleRate;
		XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterCurrentOutputStreamDescription, &propertySize, &mFileDataFormat), "get audio converter's output stream description");
		// do the same for the channel layout being output by the converter
#if VERBOSE_CONVERTER
		mFileDataFormat.PrintFormat(stdout, "", "Converter output");
#endif
		if (fiszero(mFileDataFormat.mSampleRate))
			mFileDataFormat.mSampleRate = origSampleRate;
		err = AudioConverterGetPropertyInfo(mConverter, kAudioConverterOutputChannelLayout, &propertySize, NULL);
		if (err == noErr && propertySize > 0) {
			AudioChannelLayout *layout = static_cast<AudioChannelLayout *>(malloc(propertySize));
			err = AudioConverterGetProperty(mConverter, kAudioConverterOutputChannelLayout, &propertySize, layout);
			if (err) {
				free(layout);
				XThrow(err, "couldn't get audio converter's output channel layout");
			}
			mFileChannelLayout = layout;
#if VERBOSE_CHANNELMAP
			printf("got new file's channel layout from converter: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
			free(layout);
		}
	}

	// create the output file
	if (mMode == kPreparingToCreate) {
		CAStreamBasicDescription newFileDataFormat = mFileDataFormat;
		if (fiszero(newFileDataFormat.mSampleRate))
			newFileDataFormat.mSampleRate = 44100;	// just make something up for now
#if VERBOSE_CONVERTER
		newFileDataFormat.PrintFormat(stdout, "", "Applied to new file");
#endif
		XThrowIfError(AudioFileCreate(parentDir, filename, filetype, &newFileDataFormat, 0, &mFSRef, &mAudioFile), "create audio file");
		mMode = kPreparingToWrite;
		mOwnOpenFile = true;
	} else if (saveFileDataFormat != mFileDataFormat || fnotequal(saveFileDataFormat.mSampleRate, mFileDataFormat.mSampleRate)) {
		// second check must be explicit since operator== on ASBD treats SR of zero as "don't care"
		if (fiszero(mFileDataFormat.mSampleRate))
			mFileDataFormat.mSampleRate = mClientDataFormat.mSampleRate;
#if VERBOSE_CONVERTER
		mFileDataFormat.PrintFormat(stdout, "", "Applied to new file");
#endif
		XThrowIf(fiszero(mFileDataFormat.mSampleRate), kExtAudioFileError_InvalidDataFormat, "file's sample rate is 0");
		XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyDataFormat, sizeof(AudioStreamBasicDescription), &mFileDataFormat), "couldn't update file's data format");
	}

	UInt32 deferSizeUpdates = 1;
	err = AudioFileSetProperty(mAudioFile, kAudioFilePropertyDeferSizeUpdates, sizeof(UInt32), &deferSizeUpdates);

	if (mConverter != NULL) {
		// encoder
		// get the magic cookie, if any, from the converter
		delete[] mMagicCookie;	mMagicCookie = NULL;
		mMagicCookieSize = 0;

		err = AudioConverterGetPropertyInfo(mConverter, kAudioConverterCompressionMagicCookie, &propertySize, NULL);

		// we can get a noErr result and also a propertySize == 0
		// -- if the file format does support magic cookies, but this file doesn't have one.
		if (err == noErr && propertySize > 0) {
			mMagicCookie = new Byte[propertySize];
			XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterCompressionMagicCookie, &propertySize, mMagicCookie), "get audio converter's magic cookie");
			mMagicCookieSize = propertySize;	// the converter lies and tell us the wrong size
			// now set the magic cookie on the output file
			UInt32 willEatTheCookie = false;
			// the converter wants to give us one; will the file take it?
			err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyMagicCookieData,
					NULL, &willEatTheCookie);
			if (err == noErr && willEatTheCookie) {
#if VERBOSE_CONVERTER
				printf("Setting cookie on encoded file\n");
#endif
				XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyMagicCookieData, mMagicCookieSize, mMagicCookie), "set audio file's magic cookie");
			}
		}

		// get maximum packet size
		propertySize = sizeof(UInt32);
		XThrowIfError(AudioConverterGetProperty(mConverter, kAudioConverterPropertyMaximumOutputPacketSize, &propertySize, &mFileMaxPacketSize), "get audio converter's maximum output packet size");

		AllocateBuffers(true /* okToFail */);
	} else {
		InitFileMaxPacketSize();
	}

	if (mFileChannelLayout.IsValid() && mFileChannelLayout.NumberChannels() > 2) {
		// don't bother tagging mono/stereo files
		UInt32 isWritable;
		err = AudioFileGetPropertyInfo(mAudioFile, kAudioFilePropertyChannelLayout, NULL, &isWritable);
		if (!err && isWritable) {
#if VERBOSE_CHANNELMAP
			printf("writing file's channel layout: %s\n", CAChannelLayouts::ConstantToString(mFileChannelLayout.Tag()));
#endif
			err = AudioFileSetProperty(mAudioFile, kAudioFilePropertyChannelLayout,
				mFileChannelLayout.Size(), &mFileChannelLayout.Layout());
			if (err)
				CAXException::Warning("could not set the file's channel layout", err);
		} else {
#if VERBOSE_CHANNELMAP
			printf("file won't accept a channel layout (write)\n");
#endif
		}
	}

	UpdateClientMaxPacketSize();	// also sets mFrame0Offset
	mPacketMark = 0;
	mFrameMark = 0;
}
コード例 #15
0
ファイル: CAAudioFile.cpp プロジェクト: gesius/AudioComplete
// _______________________________________________________________________________________
//
void	CAAudioFile::SetNumberFrames(SInt64 nFrames)
{
	XThrowIf(mFileDataFormat.mFramesPerPacket != 1, kExtAudioFileError_InvalidDataFormat, "SetNumberFrames only supported for PCM");
	XThrowIfError(AudioFileSetProperty(mAudioFile, kAudioFilePropertyAudioDataPacketCount, sizeof(SInt64), &nFrames), "Couldn't set number of packets on audio file");
}
コード例 #16
0
void	CAAudioFileConverter::ConvertFile(const ConversionParameters &_params)
{
	FSRef destFSRef;
	UInt32 propertySize;
	CAStreamBasicDescription destFormat;
	CAAudioChannelLayout origSrcFileLayout, srcFileLayout, destFileLayout;
	bool openedSourceFile = false, createdOutputFile = false;
	
	mParams = _params;
	mReadBuffer = NULL;
	mReadPtrs = NULL;
	CABufferList *writeBuffer = NULL;
	CABufferList *writePtrs = NULL;
	
	PrepareConversion();

	try {
		if (TaggedDecodingFromCAF())
			ReadCAFInfo();
		OpenInputFile();
		openedSourceFile = true;
		
		// get input file's format
		const CAStreamBasicDescription &srcFormat = mSrcFile.GetFileDataFormat();
		if (mParams.flags & kOpt_Verbose) {
			printf("Input file: %s, %qd frames\n", mParams.input.filePath ? basename(mParams.input.filePath) : "?", 
				mSrcFile.GetNumberFrames());
		}
		mSrcFormat = srcFormat;
		
		bool encoding = !destFormat.IsPCM();
		bool decoding = !srcFormat.IsPCM();
		
		// prepare output file's format
		destFormat = mParams.output.dataFormat;

		if (!encoding && destFormat.mSampleRate == 0.)
			// on encode, it's OK to have a 0 sample rate; ExtAudioFile will get the SR from the converter and set it on the file.
			// on decode or PCM->PCM, a sample rate of 0 is interpreted as using the source sample rate
			destFormat.mSampleRate = srcFormat.mSampleRate;
		
		// source channel layout
		srcFileLayout = mSrcFile.GetFileChannelLayout();
		origSrcFileLayout = srcFileLayout;
		if (mParams.input.channelLayoutTag != 0) {
			XThrowIf(AudioChannelLayoutTag_GetNumberOfChannels(mParams.input.channelLayoutTag)
				!= srcFormat.mChannelsPerFrame, -1, "input channel layout has wrong number of channels for file");
			srcFileLayout = CAAudioChannelLayout(mParams.input.channelLayoutTag);
			mSrcFile.SetFileChannelLayout(srcFileLayout);
		}
		
		// destination channel layout
		int outChannels = mParams.output.channels;
		if (mParams.output.channelLayoutTag != 0) {
			// use the one specified by caller, if any
			destFileLayout = CAAudioChannelLayout(mParams.output.channelLayoutTag);
		} else if (srcFileLayout.IsValid()) {
			// otherwise, assume the same as the source, if any
			destFileLayout = srcFileLayout;
		}
		if (destFileLayout.IsValid()) {
			// the output channel layout specifies the number of output channels
			if (outChannels != -1)
				XThrowIf((unsigned)outChannels != destFileLayout.NumberChannels(), -1,
					"output channel layout has wrong number of channels");
			else
				outChannels = destFileLayout.NumberChannels();
		}

		if (!(mParams.flags & kOpt_NoSanitizeOutputFormat)) {
			// adjust the output format's channels; output.channels overrides the channels
			if (outChannels == -1)
				outChannels = srcFormat.mChannelsPerFrame;
			if (outChannels > 0) {
				destFormat.mChannelsPerFrame = outChannels;
				destFormat.mBytesPerPacket *= outChannels;
				destFormat.mBytesPerFrame *= outChannels;
			}
		
			// use AudioFormat API to clean up the output format
			propertySize = sizeof(AudioStreamBasicDescription);
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propertySize, &destFormat),
					"get destination format info");
		}
		OpenOutputFile(srcFormat, destFormat, destFSRef, destFileLayout);
		createdOutputFile = true;
		mDestFormat = destFormat;
		
		// set up client formats
		CAStreamBasicDescription srcClientFormat, destClientFormat;
		{
			CAAudioChannelLayout srcClientLayout, destClientLayout;
			
			if (encoding) {
				if (decoding) {
					// transcoding
//					XThrowIf(encoding && decoding, -1, "transcoding not currently supported");
					
					if (srcFormat.mChannelsPerFrame > 2 || destFormat.mChannelsPerFrame > 2)
						CAXException::Warning("Transcoding multichannel audio may not handle channel layouts correctly", 0);
					srcClientFormat.SetCanonical(std::min(srcFormat.mChannelsPerFrame, destFormat.mChannelsPerFrame), true);
					srcClientFormat.mSampleRate = std::max(srcFormat.mSampleRate, destFormat.mSampleRate);
					mSrcFile.SetClientFormat(srcClientFormat, NULL);
					
					destClientFormat = srcClientFormat;
				} else {
					// encoding
					srcClientFormat = srcFormat;
					destClientFormat = srcFormat;
				}
				// by here, destClientFormat will have a valid sample rate
				destClientLayout = srcFileLayout.IsValid() ? srcFileLayout : destFileLayout;

				mDestFile.SetClientFormat(destClientFormat, &destClientLayout);
			} else {
				// decoding or PCM->PCM
				if (destFormat.mSampleRate == 0.)
					destFormat.mSampleRate = srcFormat.mSampleRate;
		
				destClientFormat = destFormat;
				srcClientFormat = destFormat;
				srcClientLayout = destFileLayout;
				
				mSrcFile.SetClientFormat(srcClientFormat, &srcClientLayout);
			}
		}
		
		XThrowIf(srcClientFormat.mBytesPerPacket == 0, -1, "source client format not PCM"); 
		XThrowIf(destClientFormat.mBytesPerPacket == 0, -1, "dest client format not PCM"); 		
		if (encoding) {
			// set the bitrate
			if (mParams.output.bitRate != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("bitrate = %ld\n", mParams.output.bitRate);
				mDestFile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &mParams.output.bitRate);
			}

			// set the codec quality
			if (mParams.output.codecQuality != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("codec quality = %ld\n", mParams.output.codecQuality);
				mDestFile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &mParams.output.codecQuality);
			}

			// set the bitrate strategy -- called bitrate format in the codecs since it had already shipped
			if (mParams.output.strategy != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("strategy = %ld\n", mParams.output.strategy);
				mDestFile.SetConverterProperty(kAudioCodecBitRateFormat, sizeof(UInt32), &mParams.output.strategy);
			}
		}
		// set the SRC quality
		if (mParams.output.srcQuality != -1) {
			if (srcFormat.mSampleRate != 0. && destFormat.mSampleRate != 0. && srcFormat.mSampleRate != destFormat.mSampleRate) {
				if (mParams.flags & kOpt_Verbose)
					printf("SRC quality = %ld\n", mParams.output.srcQuality);
				if (encoding)
					mDestFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
				else
					mSrcFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
			}
		}
		if (decoding) {
			if (mParams.output.primeMethod != -1)
				mSrcFile.SetConverterProperty(kAudioConverterPrimeMethod, sizeof(UInt32), &mParams.output.primeMethod);
		}

		PrintFormats(&origSrcFileLayout);

		// prepare I/O buffers
		UInt32 bytesToRead = 0x10000;
		UInt32 framesToRead = bytesToRead;	// OK, ReadPackets will limit as appropriate
		ComputeReadSize(srcFormat, destFormat, bytesToRead, framesToRead);

//		const SInt64 totalFrames = mSrcFile.GetNumberFrames();
//#warning "GetNumberFrames() can be prohibitively slow for some formats"
		
		mReadBuffer = CABufferList::New("readbuf", srcClientFormat);
		mReadBuffer->AllocateBuffers(bytesToRead);
		mReadPtrs = CABufferList::New("readptrs", srcClientFormat);
		
		BeginConversion();
		
		while (true) {
			//XThrowIf(Progress(mSrcFile.Tell(), totalFrames), userCanceledErr, "user stopped");
				// this was commented out for awhile -- performance? make it optional?
			UInt32 nFrames = framesToRead;
			mReadPtrs->SetFrom(mReadBuffer);
			AudioBufferList *readbuf = &mReadPtrs->GetModifiableBufferList();
			
			mSrcFile.Read(nFrames, readbuf);
			//printf("read %ld of %ld frames\n", nFrames, framesToRead);
			if (nFrames == 0)
				break;

			mDestFile.Write(nFrames, readbuf);
			if (ShouldTerminateConversion())
				break;
		}
		
		if (decoding) {
			// fix up the destination file's length if necessary and possible
			SInt64 nframes = mSrcFile.GetNumberFrames();
			if (nframes != 0) {
				// only shorten, don't try to lengthen
				nframes = SInt64(ceil(nframes * destFormat.mSampleRate / srcFormat.mSampleRate));
				if (nframes < mDestFile.GetNumberFrames()) {
					mDestFile.SetNumberFrames(nframes);
				}
			}
		}
		EndConversion();
	}
	catch (...) {
		delete mReadBuffer;
		delete mReadPtrs;
		delete writeBuffer;
		delete writePtrs;
		if (!createdOutputFile)
			PrintFormats(&origSrcFileLayout);
		try { mSrcFile.Close(); } catch (...) { }
		try { mDestFile.Close(); } catch (...) { }
		if (createdOutputFile)
			unlink(mOutName);
		throw;
	}
	delete mReadBuffer;
	delete mReadPtrs;
	delete writeBuffer;
	delete writePtrs;
	mSrcFile.Close();
	mDestFile.Close();
	if (TaggedEncodingToCAF())
		WriteCAFInfo();
	
	if (mParams.flags & kOpt_Verbose) {
		// must close to flush encoder; GetNumberFrames() not necessarily valid until afterwards but then
		// the file is closed
		CAAudioFile temp;
		FSRef destFSRef;
		if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) {
			temp.Open(destFSRef);
			printf("Output file: %s, %qd frames\n", basename(mOutName), temp.GetNumberFrames());
		}
	}
}
コード例 #17
0
CAAudioFileRecorder::CAAudioFileRecorder(int nBuffers, UInt32 bufferSizeFrames) :
	CAAudioFileWriter(nBuffers, bufferSizeFrames),
	mInputUnit(NULL),
	mAudioInputPtrs(NULL)
{
	// open input unit
	Component comp;
	ComponentDescription desc;
	
	desc.componentType = kAudioUnitType_Output;
	desc.componentSubType = kAudioUnitSubType_HALOutput;
	desc.componentManufacturer = kAudioUnitManufacturer_Apple;
	desc.componentFlags = 0;
	desc.componentFlagsMask = 0;
	comp = FindNextComponent(NULL, &desc);
	XThrowIf(comp == NULL, -1, "find audio input unit");
	XThrowIfError(OpenAComponent(comp, &mInputUnit), "open audio input unit");

	UInt32 enableIO;
	UInt32 propSize;
	
	enableIO = 0;
	XThrowIfError(AudioUnitSetProperty(mInputUnit,
		kAudioOutputUnitProperty_EnableIO,
		kAudioUnitScope_Output,
		0,
		&enableIO,
		sizeof(enableIO)), "failed to disable output");

	enableIO = 1;
	XThrowIfError(AudioUnitSetProperty(mInputUnit,
		kAudioOutputUnitProperty_EnableIO,
		kAudioUnitScope_Input,
		1,
		&enableIO,
		sizeof(enableIO)), "failed to enable input");
	
	// select the default input device
	propSize = sizeof(AudioDeviceID);
	AudioDeviceID inputDevice;
	XThrowIfError(
		AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propSize, &inputDevice),
		"failed to get default input device");
	
	XThrowIfError(
		AudioUnitSetProperty(mInputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(inputDevice)),
		"failed to select input device");
	
	// set render callback
	AURenderCallbackStruct input;
	input.inputProc = InputProc;
	input.inputProcRefCon = this;
	XThrowIfError(AudioUnitSetProperty(
								mInputUnit, 
								kAudioOutputUnitProperty_SetInputCallback, 
								kAudioUnitScope_Global,
								0,
								&input, 
								sizeof(input)), "connect input proc to output unit");

	XThrowIfError(AudioUnitInitialize(mInputUnit), "initialize audio input unit");
	
	// get the hardware format
	propSize = sizeof(mInputDataFormat);
	XThrowIfError(AudioUnitGetProperty(mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &mInputDataFormat, &propSize), "couldn't get input unit's stream format");
}