示例#1
0
文件: track.c 项目: one-k/rmov
/*
  call-seq: track_get_audio_channel_map() -> array
    
    Returns an array n-channels in length
    Array contains Hashes in the form: {:assignment => :description} where :description is a symbol representing an audio channel description.  eg. :Left, :Right, :Mono
  
*/
static VALUE track_get_audio_channel_map(VALUE obj)
{
  AudioChannelLayout *layout = track_get_audio_channel_layout(obj);
  if (layout == NULL) return Qnil;
  
  VALUE channels = Qnil;
  UInt32 numChannels, x, highLayoutTag;
  VALUE channel;
  char message[256];
  AudioChannelLayoutTag layoutTag = layout->mChannelLayoutTag;
  
  if (layoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) {
    // using the descriptions
    // not implemented
    numChannels = layout->mNumberChannelDescriptions;
    channels = rb_ary_new2(numChannels);
    
    // loop through all channels, adding assignment descriptions
    AudioChannelDescription desc;
    char *trackStr;
    for (x=0; x < numChannels; x++) {
      desc = layout->mChannelDescriptions[x];
      trackStr = track_str_for_AudioChannelLabel(desc.mChannelLabel);

      if (trackStr != NULL) {
        ADD_CHANNEL(channels, channel, trackStr);

      } else {
         // unsupported audio channel labels
         ADD_CHANNEL(channels, channel, "UnsupportedByRMov");
         sprintf(message, "ChannelLabel unsupported by rmov: %d", (int)desc.mChannelLabel);
         rb_hash_aset(channel, ID2SYM(rb_intern("message")), rb_str_new2(message));
      }
    }
    

  } else {
    numChannels = AudioChannelLayoutTag_GetNumberOfChannels(layoutTag);
    channels = rb_ary_new2(numChannels);

    if (layoutTag == kAudioChannelLayoutTag_UseChannelBitmap) {
      // use the bitmap approach
      // not implemented
      //rb_raise(eQuickTime, "Not Implemented: kAudioChannelLayoutTag_UseChannelBitmap in track_get_audio_channel_map");
      for (x=0; x < numChannels; x++) {
        ADD_CHANNEL(channels, channel, "UnsupportedByRMov");
        rb_hash_aset(channel, ID2SYM(rb_intern("message")), rb_str_new2("UseChannelBitmap unsupported by rmov"));
      }



    } else {
      // using a standard LayoutTag
      switch (layoutTag) {

        case kAudioChannelLayoutTag_Mono:
          ADD_CHANNEL(channels, channel, "Mono");
          break;
        
        case kAudioChannelLayoutTag_Stereo:
          ADD_CHANNEL(channels, channel, "Left");
          ADD_CHANNEL(channels, channel, "Right");
          break;
          
        case kAudioChannelLayoutTag_MatrixStereo:
          ADD_CHANNEL(channels, channel, "LeftTotal");
          ADD_CHANNEL(channels, channel, "RightTotal");
          break;
          
        case kAudioChannelLayoutTag_SMPTE_DTV:
          ADD_CHANNEL(channels, channel, "Left");
          ADD_CHANNEL(channels, channel, "Right");
          ADD_CHANNEL(channels, channel, "Center");
          ADD_CHANNEL(channels, channel, "LFEScreen");
          ADD_CHANNEL(channels, channel, "LeftSurround");
          ADD_CHANNEL(channels, channel, "RightSurround");
          ADD_CHANNEL(channels, channel, "LeftTotal");
          ADD_CHANNEL(channels, channel, "RightTotal");
          break;

        default:
          // unsupported channels
          highLayoutTag = (layoutTag & 0xff0000) >> 16;
          sprintf(message, "layoutTag unsupported by rmov: (%dL << 16) | %d", (int)highLayoutTag, (int)numChannels);
          for (x=0; x < numChannels; x++) {
            ADD_CHANNEL(channels, channel, "UnsupportedByRMov");
            rb_hash_aset(channel, ID2SYM(rb_intern("message")), rb_str_new2(message));
          }
          //rb_raise(eQuickTime, "Unsupported ChannelLayoutTag in track_get_audio_channel_map: %d", layoutTag);
          break;
      }

    }

  }
  
  free(layout);
  
  return channels;
}
示例#2
0
void	CAAudioFileConverter::ConvertFile(const ConversionParameters &_params)
{
	FSRef destFSRef;
	UInt32 propertySize;
	CAStreamBasicDescription destFormat;
	CAAudioChannelLayout origSrcFileLayout, srcFileLayout, destFileLayout;
	bool openedSourceFile = false, createdOutputFile = false;
	
	mParams = _params;
	mReadBuffer = NULL;
	mReadPtrs = NULL;
	CABufferList *writeBuffer = NULL;
	CABufferList *writePtrs = NULL;
	
	PrepareConversion();

	try {
		if (TaggedDecodingFromCAF())
			ReadCAFInfo();
		OpenInputFile();
		openedSourceFile = true;
		
		// get input file's format
		const CAStreamBasicDescription &srcFormat = mSrcFile.GetFileDataFormat();
		if (mParams.flags & kOpt_Verbose) {
			printf("Input file: %s, %qd frames\n", mParams.input.filePath ? basename(mParams.input.filePath) : "?", 
				mSrcFile.GetNumberFrames());
		}
		mSrcFormat = srcFormat;
		
		bool encoding = !destFormat.IsPCM();
		bool decoding = !srcFormat.IsPCM();
		
		// prepare output file's format
		destFormat = mParams.output.dataFormat;

		if (!encoding && destFormat.mSampleRate == 0.)
			// on encode, it's OK to have a 0 sample rate; ExtAudioFile will get the SR from the converter and set it on the file.
			// on decode or PCM->PCM, a sample rate of 0 is interpreted as using the source sample rate
			destFormat.mSampleRate = srcFormat.mSampleRate;
		
		// source channel layout
		srcFileLayout = mSrcFile.GetFileChannelLayout();
		origSrcFileLayout = srcFileLayout;
		if (mParams.input.channelLayoutTag != 0) {
			XThrowIf(AudioChannelLayoutTag_GetNumberOfChannels(mParams.input.channelLayoutTag)
				!= srcFormat.mChannelsPerFrame, -1, "input channel layout has wrong number of channels for file");
			srcFileLayout = CAAudioChannelLayout(mParams.input.channelLayoutTag);
			mSrcFile.SetFileChannelLayout(srcFileLayout);
		}
		
		// destination channel layout
		int outChannels = mParams.output.channels;
		if (mParams.output.channelLayoutTag != 0) {
			// use the one specified by caller, if any
			destFileLayout = CAAudioChannelLayout(mParams.output.channelLayoutTag);
		} else if (srcFileLayout.IsValid()) {
			// otherwise, assume the same as the source, if any
			destFileLayout = srcFileLayout;
		}
		if (destFileLayout.IsValid()) {
			// the output channel layout specifies the number of output channels
			if (outChannels != -1)
				XThrowIf((unsigned)outChannels != destFileLayout.NumberChannels(), -1,
					"output channel layout has wrong number of channels");
			else
				outChannels = destFileLayout.NumberChannels();
		}

		if (!(mParams.flags & kOpt_NoSanitizeOutputFormat)) {
			// adjust the output format's channels; output.channels overrides the channels
			if (outChannels == -1)
				outChannels = srcFormat.mChannelsPerFrame;
			if (outChannels > 0) {
				destFormat.mChannelsPerFrame = outChannels;
				destFormat.mBytesPerPacket *= outChannels;
				destFormat.mBytesPerFrame *= outChannels;
			}
		
			// use AudioFormat API to clean up the output format
			propertySize = sizeof(AudioStreamBasicDescription);
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propertySize, &destFormat),
					"get destination format info");
		}
		OpenOutputFile(srcFormat, destFormat, destFSRef, destFileLayout);
		createdOutputFile = true;
		mDestFormat = destFormat;
		
		// set up client formats
		CAStreamBasicDescription srcClientFormat, destClientFormat;
		{
			CAAudioChannelLayout srcClientLayout, destClientLayout;
			
			if (encoding) {
				if (decoding) {
					// transcoding
//					XThrowIf(encoding && decoding, -1, "transcoding not currently supported");
					
					if (srcFormat.mChannelsPerFrame > 2 || destFormat.mChannelsPerFrame > 2)
						CAXException::Warning("Transcoding multichannel audio may not handle channel layouts correctly", 0);
					srcClientFormat.SetCanonical(std::min(srcFormat.mChannelsPerFrame, destFormat.mChannelsPerFrame), true);
					srcClientFormat.mSampleRate = std::max(srcFormat.mSampleRate, destFormat.mSampleRate);
					mSrcFile.SetClientFormat(srcClientFormat, NULL);
					
					destClientFormat = srcClientFormat;
				} else {
					// encoding
					srcClientFormat = srcFormat;
					destClientFormat = srcFormat;
				}
				// by here, destClientFormat will have a valid sample rate
				destClientLayout = srcFileLayout.IsValid() ? srcFileLayout : destFileLayout;

				mDestFile.SetClientFormat(destClientFormat, &destClientLayout);
			} else {
				// decoding or PCM->PCM
				if (destFormat.mSampleRate == 0.)
					destFormat.mSampleRate = srcFormat.mSampleRate;
		
				destClientFormat = destFormat;
				srcClientFormat = destFormat;
				srcClientLayout = destFileLayout;
				
				mSrcFile.SetClientFormat(srcClientFormat, &srcClientLayout);
			}
		}
		
		XThrowIf(srcClientFormat.mBytesPerPacket == 0, -1, "source client format not PCM"); 
		XThrowIf(destClientFormat.mBytesPerPacket == 0, -1, "dest client format not PCM"); 		
		if (encoding) {
			// set the bitrate
			if (mParams.output.bitRate != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("bitrate = %ld\n", mParams.output.bitRate);
				mDestFile.SetConverterProperty(kAudioConverterEncodeBitRate, sizeof(UInt32), &mParams.output.bitRate);
			}

			// set the codec quality
			if (mParams.output.codecQuality != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("codec quality = %ld\n", mParams.output.codecQuality);
				mDestFile.SetConverterProperty(kAudioConverterCodecQuality, sizeof(UInt32), &mParams.output.codecQuality);
			}

			// set the bitrate strategy -- called bitrate format in the codecs since it had already shipped
			if (mParams.output.strategy != -1) {
				if (mParams.flags & kOpt_Verbose)
					printf("strategy = %ld\n", mParams.output.strategy);
				mDestFile.SetConverterProperty(kAudioCodecBitRateFormat, sizeof(UInt32), &mParams.output.strategy);
			}
		}
		// set the SRC quality
		if (mParams.output.srcQuality != -1) {
			if (srcFormat.mSampleRate != 0. && destFormat.mSampleRate != 0. && srcFormat.mSampleRate != destFormat.mSampleRate) {
				if (mParams.flags & kOpt_Verbose)
					printf("SRC quality = %ld\n", mParams.output.srcQuality);
				if (encoding)
					mDestFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
				else
					mSrcFile.SetConverterProperty(kAudioConverterSampleRateConverterQuality, sizeof(UInt32), &mParams.output.srcQuality);
			}
		}
		if (decoding) {
			if (mParams.output.primeMethod != -1)
				mSrcFile.SetConverterProperty(kAudioConverterPrimeMethod, sizeof(UInt32), &mParams.output.primeMethod);
		}

		PrintFormats(&origSrcFileLayout);

		// prepare I/O buffers
		UInt32 bytesToRead = 0x10000;
		UInt32 framesToRead = bytesToRead;	// OK, ReadPackets will limit as appropriate
		ComputeReadSize(srcFormat, destFormat, bytesToRead, framesToRead);

//		const SInt64 totalFrames = mSrcFile.GetNumberFrames();
//#warning "GetNumberFrames() can be prohibitively slow for some formats"
		
		mReadBuffer = CABufferList::New("readbuf", srcClientFormat);
		mReadBuffer->AllocateBuffers(bytesToRead);
		mReadPtrs = CABufferList::New("readptrs", srcClientFormat);
		
		BeginConversion();
		
		while (true) {
			//XThrowIf(Progress(mSrcFile.Tell(), totalFrames), userCanceledErr, "user stopped");
				// this was commented out for awhile -- performance? make it optional?
			UInt32 nFrames = framesToRead;
			mReadPtrs->SetFrom(mReadBuffer);
			AudioBufferList *readbuf = &mReadPtrs->GetModifiableBufferList();
			
			mSrcFile.Read(nFrames, readbuf);
			//printf("read %ld of %ld frames\n", nFrames, framesToRead);
			if (nFrames == 0)
				break;

			mDestFile.Write(nFrames, readbuf);
			if (ShouldTerminateConversion())
				break;
		}
		
		if (decoding) {
			// fix up the destination file's length if necessary and possible
			SInt64 nframes = mSrcFile.GetNumberFrames();
			if (nframes != 0) {
				// only shorten, don't try to lengthen
				nframes = SInt64(ceil(nframes * destFormat.mSampleRate / srcFormat.mSampleRate));
				if (nframes < mDestFile.GetNumberFrames()) {
					mDestFile.SetNumberFrames(nframes);
				}
			}
		}
		EndConversion();
	}
	catch (...) {
		delete mReadBuffer;
		delete mReadPtrs;
		delete writeBuffer;
		delete writePtrs;
		if (!createdOutputFile)
			PrintFormats(&origSrcFileLayout);
		try { mSrcFile.Close(); } catch (...) { }
		try { mDestFile.Close(); } catch (...) { }
		if (createdOutputFile)
			unlink(mOutName);
		throw;
	}
	delete mReadBuffer;
	delete mReadPtrs;
	delete writeBuffer;
	delete writePtrs;
	mSrcFile.Close();
	mDestFile.Close();
	if (TaggedEncodingToCAF())
		WriteCAFInfo();
	
	if (mParams.flags & kOpt_Verbose) {
		// must close to flush encoder; GetNumberFrames() not necessarily valid until afterwards but then
		// the file is closed
		CAAudioFile temp;
		FSRef destFSRef;
		if (FSPathMakeRef((UInt8 *)mOutName, &destFSRef, NULL) == noErr) {
			temp.Open(destFSRef);
			printf("Output file: %s, %qd frames\n", basename(mOutName), temp.GetNumberFrames());
		}
	}
}
示例#3
0
文件: track.c 项目: one-k/rmov
/*
  call-seq: track_get_audio_channel_count() -> number_of_channels  
*/
static VALUE track_get_audio_channel_count(VALUE obj)
{
  AudioChannelLayout *layout = track_get_audio_channel_layout(obj);
  if (layout == NULL) return Qnil;

  UInt32 numChannels = (layout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) ? layout->mNumberChannelDescriptions : AudioChannelLayoutTag_GetNumberOfChannels(layout->mChannelLayoutTag);
  
  free(layout);
  
  return INT2NUM(numChannels);
}
void	Interleave(int nInputs, const char *infilenames[], const char *outfilename, const CAAudioChannelLayout *layout)
{
	const UInt32 kBufferSizeFrames = 0x8000;
	const UInt32 kBufferSizeBytes = kBufferSizeFrames * sizeof(Float32);
	class FileAndBuffer : public CAAudioFile {
	public:
		FileAndBuffer() : mBuf(NULL), mPtrs(NULL) { }
		~FileAndBuffer() { delete mBuf; delete mPtrs; }
		
		CABufferList *	mBuf;
		CABufferList *	mPtrs;
	};
	FileAndBuffer *infiles = new FileAndBuffer[nInputs], *file;
	FileAndBuffer outfile;
	int i;
	UInt32 outputChannels = 0;
	double sampleRate = 0.;
	UInt32 maxBitDepth = 0;
	CAStreamBasicDescription clientFormat;
	bool outFileCreated = false;
	
	try {
		// set up input files
		for (i = 0; i < nInputs; ++i) {
			file = &infiles[i];
			file->Open(infilenames[i]);
			const CAStreamBasicDescription &fmt = file->GetFileDataFormat();
			//fmt.PrintFormat(stdout, "", "input file");
			XThrowIf(fmt.mFormatID != kAudioFormatLinearPCM, -1, "input files must be PCM");
			outputChannels += fmt.mChannelsPerFrame;
			if (sampleRate == 0.)
				sampleRate = fmt.mSampleRate;
			else
				XThrowIf(fmt.mSampleRate != sampleRate, -1, "input files must have the same sample rate");
			if (fmt.mBitsPerChannel > maxBitDepth)
				maxBitDepth = fmt.mBitsPerChannel;
			clientFormat.mSampleRate = sampleRate;
			clientFormat.SetCanonical(fmt.mChannelsPerFrame, false);	// deinterleaved
			file->SetClientFormat(clientFormat, NULL);
			file->mBuf = CABufferList::New("readbuf", clientFormat);
			file->mBuf->AllocateBuffers(kBufferSizeBytes);
			file->mPtrs = CABufferList::New("readptrs", clientFormat);
			//clientFormat.PrintFormat(stdout, "", "input client");
		}
		
		if (layout != NULL) {
			if (AudioChannelLayoutTag_GetNumberOfChannels(layout->Tag()) != outputChannels) {
				fprintf(stderr, "Channel layout tag '%s' is inappropriate for %u channels of audio -- aborting\n", 
					CAChannelLayouts::ConstantToString(layout->Tag()), (unsigned)outputChannels);
				exit(2);
			}
		}

		// prepare output file format
		CAStreamBasicDescription outfmt;
		outfmt.mSampleRate = sampleRate;
		outfmt.mFormatID = kAudioFormatLinearPCM;
		outfmt.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
		outfmt.mBitsPerChannel = maxBitDepth;
		outfmt.mChannelsPerFrame = outputChannels;
		outfmt.mBytesPerPacket = outfmt.mBytesPerFrame = outputChannels * (maxBitDepth >> 3);
		outfmt.mFramesPerPacket = 1;
		//outfmt.PrintFormat(stdout, "", "output file");
		
		unlink(outfilename);
		FSRef parentDir;
		CFStringRef outName;
		XThrowIfError(PosixPathToParentFSRefAndName(outfilename, parentDir, outName), "Couldn't locate output directory");
		outfile.CreateNew(parentDir, outName, kAudioFileAIFFType, outfmt, layout ? &layout->Layout() : NULL);
		outFileCreated = true;
		
		// create the output file and buffers
		clientFormat.mSampleRate = sampleRate;
		clientFormat.SetCanonical(outputChannels, false);
		outfile.SetClientFormat(clientFormat, NULL);
		//clientFormat.PrintFormat(stdout, "", "output client");

		outfile.mPtrs = CABufferList::New("writeptrs", clientFormat);

		AudioBufferList &writebufs = outfile.mPtrs->GetModifiableBufferList();
		
		while (true) {
			UInt32 maxFramesRead = 0;
			UInt32 nframes;
			int outbuf = 0;
			for (i = 0; i < nInputs; ++i) {
				file = &infiles[i];
				file->mPtrs->SetFrom(file->mBuf);
				nframes = kBufferSizeFrames;
				AudioBufferList &readbufs = file->mPtrs->GetModifiableBufferList();
				file->Read(nframes, &readbufs);
				//CAShowAudioBufferList(&readbufs, 8, 0);
				if (nframes > maxFramesRead)
					maxFramesRead = nframes;
				if (nframes < kBufferSizeFrames)
					file->mPtrs->PadWithZeroes(kBufferSizeBytes);

				memcpy(&writebufs.mBuffers[outbuf], &readbufs.mBuffers[0], 
					readbufs.mNumberBuffers * sizeof(AudioBuffer));
				outbuf += readbufs.mNumberBuffers;
			}
			if (maxFramesRead == 0)
				break;

			if (maxFramesRead < kBufferSizeFrames)
				outfile.mPtrs->SetNumBytes(maxFramesRead * sizeof(Float32));
			//CAShowAudioBufferList(&writebufs, 8, 0);
			outfile.Write(maxFramesRead, &writebufs);
			if (maxFramesRead < kBufferSizeFrames)
				break;
		}
	}
	catch (...) {
		if (outFileCreated)
			unlink(outfilename);
		delete[] infiles;
		throw;
	}
	outfile.Close();
	// input files are closed from destructors
	delete[] infiles;
}