Пример #1
0
// setup conversion from Squeak to device frame format, or vice-versa.
// requires: stereo for output, stereo or mono for input.
//
static int Stream_setFormat(Stream *s, int frameCount, int sampleRate, int stereo)
{
  int nChannels=	1 + stereo;
  AudioStreamBasicDescription imgFmt, devFmt;
  UInt32 sz= sizeof(devFmt);

  if (0 == s->direction) nChannels= 2;	// insist

  if (checkError(AudioDeviceGetProperty(s->id, 0, s->direction,
					kAudioDevicePropertyStreamFormat,
					&sz, &devFmt),
		 "GetProperty", "StreamFormat"))
    return 0;

  debugf("stream %p[%d] device format:\n", s, s->direction);  dumpFormat(&devFmt);

  imgFmt.mSampleRate	   = sampleRate;
  imgFmt.mFormatID	   = kAudioFormatLinearPCM;
#if defined(WORDS_BIGENDIAN)
  imgFmt.mFormatFlags	   = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsBigEndian;
#else
  imgFmt.mFormatFlags	   = kLinearPCMFormatFlagIsSignedInteger;
#endif
  imgFmt.mBytesPerPacket   = SqueakFrameSize / (3 - nChannels);
  imgFmt.mFramesPerPacket  = 1;
  imgFmt.mBytesPerFrame    = SqueakFrameSize / (3 - nChannels);
  imgFmt.mChannelsPerFrame = nChannels;
  imgFmt.mBitsPerChannel   = 16;

  debugf("stream %p[%d] image format:\n", s, s->direction);  dumpFormat(&imgFmt);

  if (s->direction) // input
    {
      if (checkError(AudioConverterNew(&devFmt, &imgFmt, &s->converter), "AudioConverter", "New"))
	return 0;
      sz= sizeof(s->cvtBufSize);
      s->cvtBufSize= 512 * devFmt.mBytesPerFrame;
      if (checkError(AudioConverterGetProperty(s->converter, kAudioConverterPropertyCalculateOutputBufferSize,
					       &sz, &s->cvtBufSize), 
		     "GetProperty", "OutputBufferSize"))
	return 0;
    }
  else // output
    {
      if (checkError(AudioConverterNew(&imgFmt, &devFmt, &s->converter), "AudioConverter", "New"))
	return 0;
    }

  s->channels=   nChannels;
  s->sampleRate= sampleRate;
  s->imgBufSize= SqueakFrameSize * nChannels * frameCount;

  frameCount= max(frameCount, 512 * sampleRate / devFmt.mSampleRate);

  s->buffer= Buffer_new((s->direction ? DeviceFrameSize : SqueakFrameSize) * nChannels * frameCount * 2);

  debugf("stream %p[%d] sound buffer size %d/%d (%d)\n", s, s->direction, s->imgBufSize, s->buffer->size, frameCount);

  return 1;
}
Пример #2
0
AudioConverterX::AudioConverterX(const AudioStreamBasicDescription &iasbd,
                                 const AudioStreamBasicDescription &oasbd)
{
    AudioConverterRef converter;
    CHECKCA(AudioConverterNew(&iasbd, &oasbd, &converter));
    attach(converter, true);
}
Пример #3
0
void
AppleATDecoder::SetupDecoder()
{
  AudioStreamBasicDescription inputFormat, outputFormat;
  // Fill in the input format description from the stream.
  AppleUtils::GetProperty(mStream,
      kAudioFileStreamProperty_DataFormat, &inputFormat);

  // Fill in the output format manually.
  PodZero(&outputFormat);
  outputFormat.mFormatID = kAudioFormatLinearPCM;
  outputFormat.mSampleRate = inputFormat.mSampleRate;
  outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
  outputFormat.mBitsPerChannel = 32;
  outputFormat.mFormatFlags =
    kLinearPCMFormatFlagIsFloat |
    0;
#else
# error Unknown audio sample type
#endif
  // Set up the decoder so it gives us one sample per frame
  outputFormat.mFramesPerPacket = 1;
  outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame
        = outputFormat.mChannelsPerFrame * outputFormat.mBitsPerChannel / 8;

  OSStatus rv = AudioConverterNew(&inputFormat, &outputFormat, &mConverter);
  if (rv) {
    LOG("Error %d constructing AudioConverter", rv);
    mConverter = nullptr;
    mCallback->Error();
  }
  mHaveOutput = false;
}
Пример #4
0
static GVBool gviHardwareInitCapture(GVIDevice * device)
{
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	UInt32 size;
	OSStatus result;
	GVICapturedFrame * frame;
	int numCaptureBufferBytes;
	int numCaptureBufferFrames;
	int i;

	// get the capture format
	size = sizeof(AudioStreamBasicDescription);
	result = AudioDeviceGetProperty(device->m_deviceID, 0, true, kAudioDevicePropertyStreamFormat, &size, &data->m_captureStreamDescriptor);
	if(result != noErr)
		return GVFalse;

	// create a converter from the capture format to the GV format
	result = AudioConverterNew(&data->m_captureStreamDescriptor, &GVIVoiceFormat, &data->m_captureConverter);
	if(result != noErr)
		return GVFalse;

	// allocate a capture buffer
	data->m_captureBuffer = (GVSample *)gsimalloc(GVIBytesPerFrame);
	if(!data->m_captureBuffer)
	{
		AudioConverterDispose(data->m_captureConverter);
		return GVFalse;		
	}

	// allocate space for holding captured frames
	numCaptureBufferBytes = gviMultiplyByBytesPerMillisecond(GVI_CAPTURE_BUFFER_MILLISECONDS);
	numCaptureBufferBytes = gviRoundUpToNearestMultiple(numCaptureBufferBytes, GVIBytesPerFrame);
	numCaptureBufferFrames = (numCaptureBufferBytes / GVIBytesPerFrame);
	for(i = 0 ; i < numCaptureBufferFrames ; i++)
	{
		frame = (GVICapturedFrame *)gsimalloc(sizeof(GVICapturedFrame) + GVIBytesPerFrame - sizeof(GVSample));
		if(!frame)
		{
			gviFreeCapturedFrames(&data->m_captureAvailableFrames);
			gsifree(data->m_captureBuffer);
			AudioConverterDispose(data->m_captureConverter);
			return GVFalse;
		}
		gviPushFirstFrame(&data->m_captureAvailableFrames, frame);
	}

	// init the last crossed time
	data->m_captureLastCrossedThresholdTime = (data->m_captureClock - GVI_HOLD_THRESHOLD_FRAMES - 1);

	// add property listener
	AudioDeviceAddPropertyListener(device->m_deviceID, 0, true, kAudioDevicePropertyDeviceIsAlive, gviPropertyListener, device);

#if GVI_VOLUME_IN_SOFTWARE
	// init volume
	data->m_captureVolume = (GVScalar)1.0;
#endif

	return GVTrue;
}
Пример #5
0
static void ca_start_w(CAData *d){
	OSStatus err= noErr;

	if (d->write_started==FALSE){
		AudioStreamBasicDescription inASBD;
		int i;
		
		i = ca_open_w(d);
		if (i<0)
			return;

		inASBD = d->caOutASBD;
		inASBD.mSampleRate = d->rate;
		inASBD.mFormatID = kAudioFormatLinearPCM;
		inASBD.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
		if (htonl(0x1234) == 0x1234)
		  inASBD.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
		inASBD.mChannelsPerFrame = d->stereo ? 2 : 1;
		inASBD.mBytesPerPacket = (d->bits / 8) * inASBD.mChannelsPerFrame;
		inASBD.mBytesPerFrame = (d->bits / 8) * inASBD.mChannelsPerFrame;
		inASBD.mFramesPerPacket = 1;
		inASBD.mBitsPerChannel = d->bits;


		err = AudioConverterNew( &inASBD, &d->caOutASBD, &d->caOutConverter);
		if(err != noErr)
			ms_error("AudioConverterNew %x %d", err, inASBD.mBytesPerFrame);
		else
			CAShow(d->caOutConverter);

		if (inASBD.mChannelsPerFrame == 1 && d->caOutASBD.mChannelsPerFrame == 2)
		{
			if (d->caOutConverter)
			{
				// This should be as large as the number of output channels,
				// each element specifies which input channel's data is routed to that output channel
				SInt32 channelMap[] = { 0, 0 };
				err = AudioConverterSetProperty(d->caOutConverter, kAudioConverterChannelMap, 2*sizeof(SInt32), channelMap);
			}
		}

		memset((char*)&d->caOutRenderCallback, 0, sizeof(AURenderCallbackStruct));
		d->caOutRenderCallback.inputProc = writeRenderProc;
		d->caOutRenderCallback.inputProcRefCon = d;
		err = AudioUnitSetProperty (d->caOutAudioUnit, 
                            kAudioUnitProperty_SetRenderCallback, 
                            kAudioUnitScope_Input, 
                            0,
                            &d->caOutRenderCallback, 
                            sizeof(AURenderCallbackStruct));
		if(err != noErr)
			ms_error("AudioUnitSetProperty %x", err);

		if(err == noErr) {
			if(AudioOutputUnitStart(d->caOutAudioUnit) == noErr)
				d->write_started=TRUE;
		}
	}
}
Пример #6
0
	int initoutput(){
		AudioComponentDescription desc;  
		AudioComponent comp;
		OSStatus err;
		UInt32 size;
		Boolean canwrite;
		
		AudioStreamBasicDescription 	inputdesc,outputdesc;

		desc.componentType=kAudioUnitType_Output;
		desc.componentSubType=kAudioUnitSubType_DefaultOutput;
		desc.componentManufacturer=kAudioUnitManufacturer_Apple;
		desc.componentFlags=0;
		desc.componentFlagsMask=0;

		comp=AudioComponentFindNext(NULL,&desc);
		if (comp==NULL) return -1;

		err= AudioComponentInstanceNew(comp,&out);
		if (err) return err;				

		err=AudioUnitInitialize(out);if (err) return err;
		
		err=AudioUnitGetPropertyInfo(out,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,0,&size,&canwrite);
		if (err) return err;

		err=AudioUnitGetProperty(out,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&outputdesc,&size);
		if (err) return err;		
		
//		dumpdesc(&outputdesc);
		
		inputdesc.mSampleRate=44100.0;
		inputdesc.mFormatID='lpcm';
#if __BIG_ENDIAN__
		inputdesc.mFormatFlags=0x0e;
#else
		inputdesc.mFormatFlags=0x0c;
#endif
		inputdesc.mBytesPerPacket=4;
		inputdesc.mFramesPerPacket=1;
		inputdesc.mBytesPerFrame=4;
		inputdesc.mChannelsPerFrame=2;
		inputdesc.mBitsPerChannel=16;
		inputdesc.mReserved=0;

//		dumpdesc(&inputdesc);
		
		err=AudioConverterNew(&inputdesc,&outputdesc,&conv);
		if (err) {
//			printf("AudioConvertNew failed %.*s\n",4,(char*)&err);
			return err;
		}

		return err;
	}
Пример #7
0
bool SFB::Audio::Converter::Open(CFErrorRef *error)
{
	if(!mDecoder)
		return false;

	// Open the decoder if necessary
	if(!mDecoder->IsOpen() && !mDecoder->Open(error)) {
		if(error)
			LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "Error opening decoder: " << error);

		return false;
	}

	AudioStreamBasicDescription inputFormat = mDecoder->GetFormat();
	OSStatus result = AudioConverterNew(&inputFormat, &mFormat, &mConverter);
	if(noErr != result) {
		LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "AudioConverterNewfailed: " << result << "'" << SFB::StringForOSType((OSType)result) << "'");

		if(error)
			*error = CFErrorCreate(kCFAllocatorDefault, kCFErrorDomainOSStatus, result, nullptr);

		return false;
	}

	// TODO: Set kAudioConverterPropertyCalculateInputBufferSize

	mConverterState = std::unique_ptr<ConverterStateData>(new ConverterStateData(*mDecoder));
	mConverterState->AllocateBufferList(BUFFER_SIZE_FRAMES);

	// Create the channel map
	if(mChannelLayout) {
		SInt32 channelMap [ mFormat.mChannelsPerFrame ];
		UInt32 dataSize = (UInt32)sizeof(channelMap);

		const AudioChannelLayout *channelLayouts [] = {
			mDecoder->GetChannelLayout(),
			mChannelLayout
		};

		result = AudioFormatGetProperty(kAudioFormatProperty_ChannelMap, sizeof(channelLayouts), channelLayouts, &dataSize, channelMap);
		if(noErr != result) {
			LOGGER_ERR("org.sbooth.AudioEngine.AudioConverter", "AudioFormatGetProperty (kAudioFormatProperty_ChannelMap) failed: " << result);

			if(error)
				*error = CFErrorCreate(kCFAllocatorDefault, kCFErrorDomainOSStatus, result, nullptr);

			return false;
		}
	}

	mIsOpen = true;
	return true;
}
Пример #8
0
nsresult
AppleATDecoder::SetupDecoder(mp4_demuxer::MP4Sample* aSample)
{
  if (mFormatID == kAudioFormatMPEG4AAC &&
      mConfig.extended_profile == 2) {
    // Check for implicit SBR signalling if stream is AAC-LC
    // This will provide us with an updated magic cookie for use with
    // GetInputAudioDescription.
    if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
        !mMagicCookie.Length()) {
      // nothing found yet, will try again later
      return NS_ERROR_NOT_INITIALIZED;
    }
    // An error occurred, fallback to using default stream description
  }

  LOG("Initializing Apple AudioToolbox decoder");

  AudioStreamBasicDescription inputFormat;
  PodZero(&inputFormat);
  nsresult rv =
    GetInputAudioDescription(inputFormat,
                             mMagicCookie.Length() ?
                                 mMagicCookie : *mConfig.extra_data);
  if (NS_FAILED(rv)) {
    return rv;
  }
  // Fill in the output format manually.
  PodZero(&mOutputFormat);
  mOutputFormat.mFormatID = kAudioFormatLinearPCM;
  mOutputFormat.mSampleRate = inputFormat.mSampleRate;
  mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
  mOutputFormat.mBitsPerChannel = 32;
  mOutputFormat.mFormatFlags =
    kLinearPCMFormatFlagIsFloat |
    0;
#else
# error Unknown audio sample type
#endif
  // Set up the decoder so it gives us one sample per frame
  mOutputFormat.mFramesPerPacket = 1;
  mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
        = mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;

  OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
  if (status) {
    LOG("Error %d constructing AudioConverter", status);
    mConverter = nullptr;
    return NS_ERROR_FAILURE;
  }
  return NS_OK;
}
OALCaptureDevice::OALCaptureDevice (const char* 	 inDeviceName, uintptr_t   inSelfToken, UInt32 inSampleRate, UInt32 inFormat, UInt32 inBufferSize)
	:
#if LOG_CAPTUREDEVICE_VERBOSE
        mSelfToken (inSelfToken),
#endif
		mCurrentError(ALC_NO_ERROR),
		mCaptureOn(false),
		mStoreSampleTime(0),
		mFetchSampleTime(0),
		mInputUnit(0),
		mRingBuffer(NULL),
		mBufferData(NULL),
		mSampleRateRatio(1.0),
		mRequestedRingFrames(inBufferSize),
		mAudioInputPtrs(NULL),
		mInUseFlag(0)
{
    char        *useThisDevice = (char *) inDeviceName;
	
	try {
		// translate the sample rate and data format parameters into an ASBD - this method throws
		FillInASBD(mRequestedFormat, inFormat, inSampleRate);

		// inBufferSize must be at least as big as one packet of the requested output formnat
		if (inBufferSize < mRequestedFormat.mBytesPerPacket)
           throw ((OSStatus) AL_INVALID_VALUE);
					
		// until the ALC_ENUMERATION_EXT extension is supported only use the default input device
        useThisDevice = NULL;
		
		InitializeAU(useThisDevice);

		if(mRequestedFormat.mSampleRate != mNativeFormat.mSampleRate)
		{
#if LOG_CAPTURE
	DebugMessageN2("OALCaptureDevice::OALCaptureDevice - Hardware Sample Rate: %5.1f Requested Sample Rate: %5.1f", mNativeFormat.mSampleRate, mRequestedFormat.mSampleRate);
#endif
			mSampleRateRatio = mNativeFormat.mSampleRate / mRequestedFormat.mSampleRate;
			mBufferData = (UInt8*)malloc(mRequestedFormat.mBytesPerFrame*mRequestedRingFrames*mSampleRateRatio);
			OSStatus result = AudioConverterNew(&mOutputFormat, &mRequestedFormat, &mAudioConverter);
				THROW_RESULT
		}
		
		mAudioInputPtrs = CABufferList::New("WriteBufferList", mRequestedFormat);
		
		mRingBuffer =  new OALRingBuffer();
		mRingBuffer->Allocate(mRequestedFormat.mBytesPerFrame, mRequestedRingFrames*mSampleRateRatio);
				
	}
Пример #10
0
CocoaCaConverter::CocoaCaConverter( Loader * aLoader, LoaderFunction loaderFn, const AudioStreamBasicDescription& sourceDescription, const AudioStreamBasicDescription& targetDescription, uint32_t maxPacketSize )
	: mLoader( aLoader ), mCurrentPacketDescriptions( NULL ), mLoaderFunction( loaderFn ), mMaxPacketSize( maxPacketSize )
{	
	mConverterBuffer.mNumberBuffers = 0;
	mConverterBuffer.mBuffers = NULL;
	
	OSStatus err = noErr;
	err = AudioConverterNew( &sourceDescription, &targetDescription, &mConverter );
	if( err ) {
		throw IoExceptionUnsupportedDataFormat();
		/*switch(err) {
			case kAudioConverterErr_FormatNotSupported:
				std::cout << "kAudioConverterErr_FormatNotSupported" << std::endl;
			break;
			case kAudioConverterErr_OperationNotSupported:
				std::cout << "kAudioConverterErr_OperationNotSupported" << std::endl;
			break;
			case kAudioConverterErr_PropertyNotSupported:
				std::cout << "kAudioConverterErr_PropertyNotSupported" << std::endl;
			break;
			case kAudioConverterErr_InvalidInputSize:
				std::cout << "kAudioConverterErr_InvalidInputSize" << std::endl;
			break;
			case kAudioConverterErr_InvalidOutputSize:
				std::cout << "kAudioConverterErr_InvalidOutputSize" << std::endl;
			break;
			case kAudioConverterErr_UnspecifiedError:
				std::cout << "kAudioConverterErr_UnspecifiedError" << std::endl;
			break;
			case kAudioConverterErr_BadPropertySizeError:
				std::cout << "kAudioConverterErr_BadPropertySizeError" << std::endl;
			break;
			case kAudioConverterErr_RequiresPacketDescriptionsError:
				std::cout << "kAudioConverterErr_RequiresPacketDescriptionsError" << std::endl;
			break;
			case kAudioConverterErr_InputSampleRateOutOfRange:
				std::cout << "kAudioConverterErr_InputSampleRateOutOfRange" << std::endl;
			break;
			case kAudioConverterErr_OutputSampleRateOutOfRange:
				std::cout << "kAudioConverterErr_OutputSampleRateOutOfRange" << std::endl;
			break;
		}*/
	}
}
Пример #11
0
OSStatus MT32Synth::Initialize() {
    
    destFormat = GetStreamFormat(kAudioUnitScope_Output, 0);
    
    AudioStreamBasicDescription sourceDescription;
    sourceDescription.mSampleRate = 32000;
    sourceDescription.mBytesPerFrame = 4;
    sourceDescription.mBitsPerChannel = 16;
    sourceDescription.mFormatID = kAudioFormatLinearPCM;
    sourceDescription.mBytesPerPacket = 4;
    sourceDescription.mChannelsPerFrame = 2;
    sourceDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger;
    sourceDescription.mFramesPerPacket = 1;
    sourceDescription.mReserved = 0;
    
    AudioConverterNew(&sourceDescription, &destFormat, &audioConverterRef);
    MT32Emu::FileStream controlROMFile;
    MT32Emu::FileStream pcmROMFile;
    
    if(!controlROMFile.open("/Library/MT32/MT32_CONTROL.ROM")) {
        printf("Error opening MT32_CONTROL.ROM\n");
    }
    
    if(!pcmROMFile.open("/Library/MT32/MT32_PCM.ROM")) {
        printf("Error opening MT32_PCM.ROM\n");
    }
    
    romImage = MT32Emu::ROMImage::makeROMImage(&controlROMFile);
    pcmRomImage = MT32Emu::ROMImage::makeROMImage(&pcmROMFile);
    
    synth = new MT32Emu::Synth();
    synth->open(*romImage, *pcmRomImage);
    
    MT32Emu::ROMImage::freeROMImage(romImage);
    MT32Emu::ROMImage::freeROMImage(pcmRomImage);
    
    sendMIDI(0xC1, 0x00, 0x00, 0x00);
    MusicDeviceBase::Initialize();
    
    synth->setOutputGain(2.0);
    
    return noErr;
}
Пример #12
0
void
AppleMP3Reader::SetupDecoder()
{
  // Get input format description from demuxer
  AudioStreamBasicDescription inputFormat, outputFormat;
  GetProperty(mAudioFileStream, kAudioFileStreamProperty_DataFormat, &inputFormat);

  memset(&outputFormat, 0, sizeof(outputFormat));

  // Set output format
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
  outputFormat.mBitsPerChannel = 32;
  outputFormat.mFormatFlags =
    kLinearPCMFormatFlagIsFloat |
    0;
#else
#error Unknown audio sample type
#endif

  mAudioSampleRate = outputFormat.mSampleRate = inputFormat.mSampleRate;
  mAudioChannels
    = outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
  mAudioFramesPerCompressedPacket = inputFormat.mFramesPerPacket;

  outputFormat.mFormatID = kAudioFormatLinearPCM;

  // Set up the decoder so it gives us one sample per frame; this way, it will
  // pass us all the samples it has in one go. Also makes it much easier to
  // deinterlace.
  outputFormat.mFramesPerPacket = 1;
  outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame
    = outputFormat.mChannelsPerFrame * outputFormat.mBitsPerChannel / 8;

  OSStatus rv = AudioConverterNew(&inputFormat,
                                  &outputFormat,
                                  &mAudioConverter);

  if (rv) {
    LOGE("Error constructing audio format converter: %x\n", rv);
    mAudioConverter = nullptr;
    return;
  }
}
Пример #13
0
static void ca_start_r(CAData *d){
	OSStatus err= noErr;
	
	if (d->read_started==FALSE){
		AudioStreamBasicDescription outASBD;
		int i;
		
		i = ca_open_r(d);
		if (i<0)
			return;
		
		outASBD = d->caInASBD;
		outASBD.mSampleRate = d->rate;
		outASBD.mFormatID = kAudioFormatLinearPCM;
		outASBD.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
		if (htonl(0x1234) == 0x1234)
		  outASBD.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
		outASBD.mChannelsPerFrame = d->stereo ? 2 : 1;
		outASBD.mBytesPerPacket = (d->bits / 8) * outASBD.mChannelsPerFrame;
		outASBD.mBytesPerFrame = (d->bits / 8) * outASBD.mChannelsPerFrame;
		outASBD.mFramesPerPacket = 1;
		outASBD.mBitsPerChannel = d->bits;

		err = AudioConverterNew( &d->caInASBD, &outASBD, &d->caInConverter);
		if(err != noErr)
			ms_error("AudioConverterNew %x %d", err, outASBD.mBytesPerFrame);
		else
			CAShow(d->caInConverter);

		d->caInRenderCallback.inputProc = readRenderProc;
		d->caInRenderCallback.inputProcRefCon = d;
		err = AudioUnitSetProperty(d->caInAudioUnit,
						kAudioOutputUnitProperty_SetInputCallback,
						kAudioUnitScope_Global,
						0,
						&d->caInRenderCallback,
						sizeof(AURenderCallbackStruct));

		if(AudioOutputUnitStart(d->caInAudioUnit) == noErr)
			d->read_started = TRUE;
	}
}
Пример #14
0
void
AppleATDecoder::SetupDecoder()
{
    AudioStreamBasicDescription inputFormat;

    mHaveOutput = false;

    nsresult rv = AppleUtils::GetRichestDecodableFormat(mStream, inputFormat);
    if (NS_FAILED(rv)) {
        mCallback->Error();
        return;
    }

    // Fill in the output format manually.
    PodZero(&mOutputFormat);
    mOutputFormat.mFormatID = kAudioFormatLinearPCM;
    mOutputFormat.mSampleRate = inputFormat.mSampleRate;
    mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
    mOutputFormat.mBitsPerChannel = 32;
    mOutputFormat.mFormatFlags =
        kLinearPCMFormatFlagIsFloat |
        0;
#else
# error Unknown audio sample type
#endif
    // Set up the decoder so it gives us one sample per frame
    mOutputFormat.mFramesPerPacket = 1;
    mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
                                    = mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;

    OSStatus status =
        AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
    if (status) {
        LOG("Error %d constructing AudioConverter", rv);
        mConverter = nullptr;
        mCallback->Error();
    }
}
Пример #15
0
const OSStatus AudioFile::setClientFormat(const AudioStreamBasicDescription clientASBD)
{
  checkError(AudioConverterNew(&mInputFormat, &clientASBD, &mAudioConverterRef), "AudioConverterNew");
  mClientFormat = clientASBD;
  
  UInt32 size = sizeof(UInt32);
  UInt32 maxPacketSize;
  checkError(AudioFileGetProperty(mAudioFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "AudioFileGetProperty");
  
  if (mIsVBR) {
    mPacketDescs = (AudioStreamPacketDescription*)calloc(mNumPacketsToRead, sizeof(AudioStreamPacketDescription));
  }
  
  // set magic cookie to the AudioConverter
  {
    UInt32 cookieSize;
    OSStatus err = AudioFileGetPropertyInfo(mAudioFileID, 
                                            kAudioFilePropertyMagicCookieData, 
                                            &cookieSize, 
                                            NULL);
    
    if (err == noErr && cookieSize > 0){
      char *magicCookie = (char*)malloc(sizeof(UInt8) * cookieSize);
      UInt32	magicCookieSize = cookieSize;
      AudioFileGetProperty(mAudioFileID,
                           kAudioFilePropertyMagicCookieData,
                           &magicCookieSize,
                           magicCookie);
      
      AudioConverterSetProperty(mAudioConverterRef,
                                kAudioConverterDecompressionMagicCookie,
                                magicCookieSize,
                                magicCookie);
      free(magicCookie);
    }
  }
  
  return noErr;
}
Пример #16
0
/* This is called by audio file stream parser when it finds property values */
void Audio_Stream::propertyValueCallback(void *inClientData, AudioFileStreamID inAudioFileStream, AudioFileStreamPropertyID inPropertyID, UInt32 *ioFlags)
{
    AS_TRACE("%s\n", __PRETTY_FUNCTION__);
    Audio_Stream *THIS = static_cast<Audio_Stream*>(inClientData);
    
    if (!THIS->m_audioStreamParserRunning) {
        AS_TRACE("%s: stray callback detected!\n", __PRETTY_FUNCTION__);
        return;
    }
    
    switch (inPropertyID) {
        case kAudioFileStreamProperty_DataOffset: {
            SInt64 offset;
            UInt32 offsetSize = sizeof(offset);
            OSStatus result = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataOffset, &offsetSize, &offset);
            if (result == 0) {
                THIS->m_dataOffset = offset;
            } else {
                AS_TRACE("%s: reading kAudioFileStreamProperty_DataOffset property failed\n", __PRETTY_FUNCTION__);
            }
            
            break;
        }
        case kAudioFileStreamProperty_AudioDataByteCount: {
            UInt32 byteCountSize = sizeof(UInt64);
            OSStatus err = AudioFileStreamGetProperty(inAudioFileStream,
                                                      kAudioFileStreamProperty_AudioDataByteCount,
                                                      &byteCountSize, &THIS->m_audioDataByteCount);
            if (err) {
                THIS->m_audioDataByteCount = 0;
            }
            break;
        }
        case kAudioFileStreamProperty_ReadyToProducePackets: {
            memset(&(THIS->m_srcFormat), 0, sizeof m_srcFormat);
            UInt32 asbdSize = sizeof(m_srcFormat);
            OSStatus err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &(THIS->m_srcFormat));
            if (err) {
                AS_TRACE("Unable to set the src format\n");
                break;
            }
            
            THIS->m_packetDuration = THIS->m_srcFormat.mFramesPerPacket / THIS->m_srcFormat.mSampleRate;
            
            AS_TRACE("srcFormat, bytes per packet %i\n", (unsigned int)THIS->m_srcFormat.mBytesPerPacket);
            
            if (THIS->m_audioConverter) {
                AudioConverterDispose(THIS->m_audioConverter);
            }
            
            err = AudioConverterNew(&(THIS->m_srcFormat),
                                    &(THIS->m_dstFormat),
                                    &(THIS->m_audioConverter));
            
            if (err) {
                AS_TRACE("Error in creating an audio converter, error %i\n", err);
                
                THIS->m_initializationError = err;
            }
            
            THIS->setCookiesForStream(inAudioFileStream);
            
            THIS->audioQueue()->handlePropertyChange(inAudioFileStream, inPropertyID, ioFlags);
            break;
        }
        default: {
            THIS->audioQueue()->handlePropertyChange(inAudioFileStream, inPropertyID, ioFlags);
            break;
        }
    }
}
JNIEXPORT jint JNICALL Java_com_apple_audio_toolbox_AudioConverter_AudioConverterNew
  (JNIEnv *, jclass, jint inSourceFormat, jint inDestinationFormat, jint outAudioConverter)
{
	return (jint)AudioConverterNew((const AudioStreamBasicDescription *)inSourceFormat, (const AudioStreamBasicDescription *)inDestinationFormat, (AudioConverterRef *)outAudioConverter);
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate)
{
	AudioFileID         sourceFileID = 0;
    AudioFileID         destinationFileID = 0;
    AudioConverterRef   converter = NULL;
    Boolean             canResumeFromInterruption = true; // we can continue unless told otherwise
    
    CAStreamBasicDescription srcFormat, dstFormat;
    AudioFileIO afio = {};
    
    char                         *outputBuffer = NULL;
    AudioStreamPacketDescription *outputPacketDescriptions = NULL;
    
    OSStatus error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    
    printf("\nDoConvertFile\n");
    
    try {
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed");
	
        // get the source data format
        UInt32 size = sizeof(srcFormat);
        XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format");
        
        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if the output format is PC create a 16-bit int PCM file format description as an example
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("Source File format: "); srcFormat.Print();
        printf("Destination format: "); dstFormat.Print();
	
        // create the AudioConverter
        
        XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!");
    
        // if the source has a cookie, get it and set it on the Audio Converter
        ReadCookie(sourceFileID, converter);

        // get the actual formats back from the Audio Converter
        size = sizeof(srcFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!");

        size = sizeof(dstFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!");

        printf("Formats returned from AudioConverter:\n");
        printf("              Source format: "); srcFormat.Print();
        printf("    Destination File format: "); dstFormat.Print();
        
        // if encoding to AAC set the bitrate
        // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
        // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate
        //     but there are combinations (also depending on the number of channels) which will not be allowed
        // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels
        // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases
        //    and if you have stereo or more, you can multiply that number by the number of channels.
        
        if (outputBitRate == 0) {
            outputBitRate = 192000; // 192kbs
        }
        
        if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) {
            
            UInt32 propSize = sizeof(outputBitRate);
            
            // set the bit rate depending on the samplerate chosen
            XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate),
                           "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!");
            
            // get it back and print it out
            AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate);
            printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate);
        }

        // can the Audio Converter resume conversion after an interruption?
        // this property may be queried at any time after construction of the Audio Converter after setting its output format
        // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error);
            }
            
            error = noErr;
        }
        
        // create the destination file 
        XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!");

        // set up source buffers and data proc info struct
        afio.srcFileID = sourceFileID;
        afio.srcBufferSize = 32768;
        afio.srcBuffer = new char [afio.srcBufferSize];
        afio.srcFilePos = 0;
        afio.srcFormat = srcFormat;
		
        if (srcFormat.mBytesPerPacket == 0) {
            // if the source format is VBR, we need to get the maximum packet size
            // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
            // in the file (without actually scanning the whole file to find the largest packet,
            // as may happen with kAudioFilePropertyMaximumPacketSize)
            size = sizeof(afio.srcSizePerPacket);
            XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!");
            
            // how many packets can we read for our buffer size?
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead];
        } else {
            // CBR source format
            afio.srcSizePerPacket = srcFormat.mBytesPerPacket;
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            afio.packetDescriptions = NULL;
        }

        // set up output buffers
        UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR
        UInt32 theOutputBufSize = 32768;
        outputBuffer = new char[theOutputBufSize];
        
        if (outputSizePerPacket == 0) {
            // if the destination format is VBR, we need to get max size per packet from the converter
            size = sizeof(outputSizePerPacket);
            XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!");
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
        }
        UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket;

        // if the destination format has a cookie, get it and set it on the output file
        WriteCookie(converter, destinationFileID);

        // write destination channel layout
        if (srcFormat.mChannelsPerFrame > 2) {
            WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID);
        }

        UInt64 totalOutputFrames = 0; // used for debgging printf
        SInt64 outputFilePos = 0;
        
        // loop to convert data
        printf("Converting...\n");
        while (1) {

            // set up output buffer list
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame;
            fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize;
            fillBufList.mBuffers[0].mData = outputBuffer;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = NO;
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the Audio Converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            // convert data
            UInt32 ioOutputDataPackets = numOutputPackets;
            printf("AudioConverterFillComplexBuffer...\n");
            error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions);
            // if interrupted in the process of the conversion call, we must handle the error appropriately
            if (error) {
                if (kAudioConverterErr_HardwareInUse == error) {
                     printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n");
                } else {
                    XThrowIfError(error, "AudioConverterFillComplexBuffer error!");
                }
            } else {
                if (ioOutputDataPackets == 0) {
                    // this is the EOF conditon
                    error = noErr;
                    break;
                }
            }
            
            if (noErr == error) {
                // write to output file
                UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize;
                XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!");
            
                printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes);
                
                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.mFramesPerPacket) { 
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket);
                } else if (outputPacketDescriptions != NULL) {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (UInt32 i = 0; i < ioOutputDataPackets; ++i)
                        totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket;
                }
            }
        } // while

        if (noErr == error) {
            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.mBitsPerChannel == 0) {
                // our output frame count should jive with
                printf("Total number of output frames counted: %lld\n", totalOutputFrames); 
                WritePacketTableInfo(converter, destinationFileID);
            }
        
            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFileID);
        }
    }
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // cleanup
    if (converter) AudioConverterDispose(converter);
    if (destinationFileID) AudioFileClose(destinationFileID);
	if (sourceFileID) AudioFileClose(sourceFileID);
    
    if (afio.srcBuffer) delete [] afio.srcBuffer;
    if (afio.packetDescriptions) delete [] afio.packetDescriptions;
    if (outputBuffer) delete [] outputBuffer;
    if (outputPacketDescriptions) delete [] outputPacketDescriptions;
    
    
    return error;
}
Пример #19
0
    std::shared_ptr<Buffer>
    AudioMixer::resample(const uint8_t* const buffer,
                         size_t size,
                         AudioBufferMetadata& metadata)
    {
        const auto inFrequncyInHz = metadata.getData<kAudioMetadataFrequencyInHz>();
        const auto inBitsPerChannel = metadata.getData<kAudioMetadataBitsPerChannel>();
        const auto inChannelCount = metadata.getData<kAudioMetadataChannelCount>();
        const auto inFlags = metadata.getData<kAudioMetadataFlags>();
        const auto inBytesPerFrame = metadata.getData<kAudioMetadataBytesPerFrame>();
        const auto inNumberFrames = metadata.getData<kAudioMetadataNumberFrames>();
        const auto inUsesOSStruct = metadata.getData<kAudioMetadataUsesOSStruct>();
        
        if(m_outFrequencyInHz == inFrequncyInHz &&
           m_outBitsPerChannel == inBitsPerChannel &&
           m_outChannelCount == inChannelCount
           && !(inFlags & kAudioFormatFlagIsNonInterleaved)
           && !(inFlags & kAudioFormatFlagIsFloat))
        {
            // No resampling necessary
            return std::make_shared<Buffer>();
        }
        
        uint64_t hash = uint64_t(inBytesPerFrame&0xFF) << 56 | uint64_t(inFlags&0xFF) << 48 | uint64_t(inChannelCount&0xFF) << 40
                        | uint64_t(inBitsPerChannel&0xFF) << 32 | inFrequncyInHz;
        
        auto it = m_converters.find(hash) ;
        ConverterInst converter = {0};
        
        if(it == m_converters.end()) {
            AudioStreamBasicDescription in = {0};
            AudioStreamBasicDescription out = {0};
            
            in.mFormatID = kAudioFormatLinearPCM;
            in.mFormatFlags =  inFlags;
            in.mChannelsPerFrame = inChannelCount;
            in.mSampleRate = inFrequncyInHz;
            in.mBitsPerChannel = inBitsPerChannel;
            in.mBytesPerFrame = inBytesPerFrame;
            in.mFramesPerPacket = 1;
            in.mBytesPerPacket = in.mBytesPerFrame * in.mFramesPerPacket;
            
            out.mFormatID = kAudioFormatLinearPCM;
            out.mFormatFlags =  kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
            out.mChannelsPerFrame = m_outChannelCount;
            out.mSampleRate = m_outFrequencyInHz;
            out.mBitsPerChannel = m_outBitsPerChannel;
            out.mBytesPerFrame = (out.mBitsPerChannel * out.mChannelsPerFrame) / 8;
            out.mFramesPerPacket = 1;
            out.mBytesPerPacket = out.mBytesPerFrame * out.mFramesPerPacket;
            
            converter.asbdIn = in;
            converter.asbdOut = out;
            
            OSStatus ret = AudioConverterNew(&in, &out, &converter.converter);
            
            AudioConverterSetProperty(converter.converter,
                                      kAudioConverterSampleRateConverterComplexity,
                                      sizeof(s_samplingRateConverterComplexity),
                                      &s_samplingRateConverterComplexity);
            
            AudioConverterSetProperty(converter.converter,
                                      kAudioConverterSampleRateConverterQuality,
                                      sizeof(s_samplingRateConverterQuality),
                                      &s_samplingRateConverterQuality);
        
            auto prime = kConverterPrimeMethod_None;
            
            AudioConverterSetProperty(converter.converter,
                                      kAudioConverterPrimeMethod,
                                      sizeof(prime),
                                      &prime);
            
            m_converters[hash] = converter;
            
            if(ret != noErr) {
                DLog("ret = %d (%x)", (int)ret, (unsigned)ret);
            }
            
        } else {
            converter = it->second;
        }
        
        auto & in = converter.asbdIn;
        auto & out = converter.asbdOut;

        const double inSampleCount = inNumberFrames;
        const double ratio = static_cast<double>(inFrequncyInHz) / static_cast<double>(m_outFrequencyInHz);
        
        const double outBufferSampleCount = std::round(double(inSampleCount) / ratio);
        
        const size_t outBufferSize = out.mBytesPerPacket * outBufferSampleCount;
        const auto outBuffer = std::make_shared<Buffer>(outBufferSize);
        
        
        std::unique_ptr<UserData> ud(new UserData());
        ud->size = static_cast<int>(size);
        ud->data = const_cast<uint8_t*>(buffer);
        ud->p = inUsesOSStruct ? 0 : ud->data;
        ud->packetSize = in.mBytesPerPacket;
        ud->numberPackets = inSampleCount;
        ud->numChannels = inChannelCount;
        ud->isInterleaved = !(inFlags & kAudioFormatFlagIsNonInterleaved);
        ud->usesOSStruct = inUsesOSStruct;
        
        AudioBufferList outBufferList;
        outBufferList.mNumberBuffers = 1;
        outBufferList.mBuffers[0].mDataByteSize = static_cast<UInt32>(outBufferSize);
        outBufferList.mBuffers[0].mNumberChannels = m_outChannelCount;
        outBufferList.mBuffers[0].mData = (*outBuffer)();
        
        UInt32 sampleCount = outBufferSampleCount;
        OSStatus ret = AudioConverterFillComplexBuffer(converter.converter, /* AudioConverterRef inAudioConverter */
                                        AudioMixer::ioProc, /* AudioConverterComplexInputDataProc inInputDataProc */
                                        ud.get(), /* void *inInputDataProcUserData */
                                        &sampleCount, /* UInt32 *ioOutputDataPacketSize */
                                        &outBufferList, /* AudioBufferList *outOutputData */
                                        NULL /* AudioStreamPacketDescription *outPacketDescription */
                                        );
        if(ret != noErr) {
            DLog("ret = %d (%x)", (int)ret, (unsigned)ret);
        }
      
        outBuffer->setSize(outBufferList.mBuffers[0].mDataByteSize);
        return outBuffer;
    }
Пример #20
0
static GVBool gviHardwareInitPlayback(GVIDevice * device)
{
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	UInt32 size;
	OSStatus result;
	UInt32 primeMethod;
	SInt32 channelMap[100];
	int i;

	// create the array of sources
	data->m_playbackSources = gviNewSourceList();
	if(!data->m_playbackSources)
		return GVFalse;

	// get the playback format
	size = sizeof(AudioStreamBasicDescription);
	result = AudioDeviceGetProperty(device->m_deviceID, 0, false, kAudioDevicePropertyStreamFormat, &size, &data->m_playbackStreamDescriptor);
	if(result != noErr)
	{
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// create a converter from the GV format to the playback format
	result = AudioConverterNew(&GVIVoiceFormat, &data->m_playbackStreamDescriptor, &data->m_playbackConverter);
	if(result != noErr)
	{
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// set it to do no priming
	primeMethod = kConverterPrimeMethod_None;
	result = AudioConverterSetProperty(data->m_playbackConverter, kAudioConverterPrimeMethod, sizeof(UInt32), &primeMethod);
	if(result != noErr)
	{
		AudioConverterDispose(data->m_playbackConverter);
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// setup the converter to map the input channel to all output channels
	result = AudioConverterGetPropertyInfo(data->m_playbackConverter, kAudioConverterChannelMap, &size, NULL);
	if(result == noErr)
	{
		result = AudioConverterGetProperty(data->m_playbackConverter, kAudioConverterChannelMap, &size, channelMap);
		if(result == noErr)
		{
			for(i = 0 ; i < (size / sizeof(SInt32)) ; i++)
				channelMap[i] = 0;

			AudioConverterSetProperty(data->m_playbackConverter, kAudioConverterChannelMap, size, channelMap);
		}
	}

	// allocate the playback buffer
	data->m_playbackBuffer = (GVSample *)gsimalloc(GVIBytesPerFrame);
	if(!data->m_playbackBuffer)
	{
		AudioConverterDispose(data->m_playbackConverter);
		gviFreeSourceList(data->m_playbackSources);
		return GVFalse;
	}

	// add property listener
	AudioDeviceAddPropertyListener(device->m_deviceID, 0, false, kAudioDevicePropertyDeviceIsAlive, gviPropertyListener, device);

#if GVI_VOLUME_IN_SOFTWARE
	// init volume
	data->m_playbackVolume = (GVScalar)1.0;
#endif

	return GVTrue;
}
Пример #21
0
static void set_recorded_format(phastream_t *as, float rate, unsigned channels, unsigned format) {
	ca_dev *cadev = (ca_dev *) as->drvinfo;
	OSStatus err = noErr;
/* <UOLFONE <pTime adjustment> > */
	//unsigned decodedFrameSize = as->ms.codec->decoded_framesize;
	unsigned decodedFrameSize = ph_astream_decoded_framesize_get(as);
/* </UOLFONE> */
	AudioStreamBasicDescription imgFmt, devFmt;
	UInt32 propsize = sizeof(devFmt);
	UInt32 formatFlags = kLinearPCMFormatFlagIsSignedInteger |
		kLinearPCMFormatFlagIsPacked |
		kLinearPCMFormatFlagIsNonInterleaved;

#if defined(__BIG_ENDIAN__)
	formatFlags |= kLinearPCMFormatFlagIsBigEndian;
#endif

	err = AudioDeviceGetProperty(get_audiodeviceid(cadev->inputID), 0, 1, kAudioDevicePropertyStreamFormat, &propsize, &devFmt);
	if (err != noErr) {
		DBG_DYNA_AUDIO_DRV("!!CoreAudio: can't get device info\n");
		return;
	}

	DBG_DYNA_AUDIO_DRV("**CoreAudio: Input format:\n\t");
	DBG_DYNA_AUDIO_DRV("...SampleRate=%f,\n", devFmt.mSampleRate);
	DBG_DYNA_AUDIO_DRV("...BytesPerPacket=%ld,\n", devFmt.mBytesPerPacket);
	DBG_DYNA_AUDIO_DRV("...FramesPerPacket=%ld,\n", devFmt.mFramesPerPacket);
	DBG_DYNA_AUDIO_DRV("...BytesPerFrame=%ld,\n", devFmt.mBytesPerFrame);
	DBG_DYNA_AUDIO_DRV("...BitsPerChannel=%ld,\n", devFmt.mBitsPerChannel);
	DBG_DYNA_AUDIO_DRV("...ChannelsPerFrame=%ld\n", devFmt.mChannelsPerFrame);

	imgFmt.mSampleRate = rate;
	imgFmt.mFormatID = kAudioFormatLinearPCM;
	imgFmt.mFormatFlags = formatFlags;
	imgFmt.mBytesPerPacket = 2;
	imgFmt.mFramesPerPacket = 1;
	imgFmt.mBytesPerFrame = 2;
	imgFmt.mChannelsPerFrame = channels;
	imgFmt.mBitsPerChannel = format;

	err = AudioConverterNew(&devFmt, &imgFmt, &cadev->inputConverter);
	if (err != noErr) {
		DBG_DYNA_AUDIO_DRV("!!CoreAudio: can't create audio converter for input\n");
		return;
	}

	if (as->actual_rate != as->clock_rate) {
		decodedFrameSize *= 2;
	}

	propsize = sizeof(unsigned);
	cadev->inputConverterBufferSize = decodedFrameSize;
	if ((cadev->convertedInputBuffer = malloc(sizeof(char) * decodedFrameSize)) == NULL) {
		DBG_DYNA_AUDIO_DRV("!!CoreAudio: can't allocate enough memory for cadev->convertedInputBuffer\n");
		return;
	}

	err = AudioConverterGetProperty(cadev->inputConverter, kAudioConverterPropertyCalculateInputBufferSize,
		&propsize, &cadev->inputConverterBufferSize);
	if (err != noErr) {
		DBG_DYNA_AUDIO_DRV("!!CoreAudio: can't get input converter buffer size\n");
		return;
	}
}
Пример #22
0
/***********************************************************************
 * hb_work_encCoreAudio_init
 ***********************************************************************
 *
 **********************************************************************/
int encCoreAudioInit(hb_work_object_t *w, hb_job_t *job, enum AAC_MODE mode)
{
    hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
    hb_audio_t *audio = w->audio;
    AudioStreamBasicDescription input, output;
    UInt32 tmp, tmpsiz = sizeof(tmp);
    OSStatus err;

    w->private_data = pv;
    pv->job = job;

    // pass the number of channels used into the private work data
    pv->nchannels =
        hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);

    bzero(&input, sizeof(AudioStreamBasicDescription));
    input.mSampleRate = (Float64)audio->config.out.samplerate;
    input.mFormatID = kAudioFormatLinearPCM;
    input.mFormatFlags = (kLinearPCMFormatFlagIsFloat|kAudioFormatFlagsNativeEndian);
    input.mBytesPerPacket = 4 * pv->nchannels;
    input.mFramesPerPacket = 1;
    input.mBytesPerFrame = input.mBytesPerPacket * input.mFramesPerPacket;
    input.mChannelsPerFrame = pv->nchannels;
    input.mBitsPerChannel = 32;

    bzero(&output, sizeof(AudioStreamBasicDescription));
    switch (mode)
    {
        case AAC_MODE_HE:
            output.mFormatID = kAudioFormatMPEG4AAC_HE;
            break;
        case AAC_MODE_LC:
        default:
            output.mFormatID = kAudioFormatMPEG4AAC;
            break;
    }
    output.mSampleRate = (Float64)audio->config.out.samplerate;
    output.mChannelsPerFrame = pv->nchannels;
    // let CoreAudio decide the rest

    // initialise encoder
    err = AudioConverterNew(&input, &output, &pv->converter);
    if (err != noErr)
    {
        // Retry without the samplerate
        bzero(&output, sizeof(AudioStreamBasicDescription));
        switch (mode)
        {
            case AAC_MODE_HE:
                output.mFormatID = kAudioFormatMPEG4AAC_HE;
                break;
            case AAC_MODE_LC:
            default:
                output.mFormatID = kAudioFormatMPEG4AAC;
                break;
        }
        output.mChannelsPerFrame = pv->nchannels;

        err = AudioConverterNew(&input, &output, &pv->converter);

        if (err != noErr)
        {
            hb_log("Error creating an AudioConverter err=%"PRId64" output.mBytesPerFrame=%"PRIu64"",
                   (int64_t)err, (uint64_t)output.mBytesPerFrame);
            *job->done_error = HB_ERROR_UNKNOWN;
            *job->die = 1;
            return -1;
        }
    }

    // set encoder quality to maximum
    tmp = kAudioConverterQuality_Max;
    AudioConverterSetProperty(pv->converter, kAudioConverterCodecQuality,
                              sizeof(tmp), &tmp);

    if (audio->config.out.bitrate > 0)
    {
        // set encoder bitrate control mode to constrained variable
        tmp = kAudioCodecBitRateControlMode_VariableConstrained;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertyBitRateControlMode,
                                  sizeof(tmp), &tmp);

        // get available bitrates
        AudioValueRange *bitrates;
        ssize_t bitrateCounts;
        err = AudioConverterGetPropertyInfo(pv->converter,
                                            kAudioConverterApplicableEncodeBitRates,
                                            &tmpsiz, NULL);
        bitrates = malloc(tmpsiz);
        err = AudioConverterGetProperty(pv->converter,
                                        kAudioConverterApplicableEncodeBitRates,
                                        &tmpsiz, bitrates);
        bitrateCounts = tmpsiz / sizeof(AudioValueRange);

        // set bitrate
        tmp = audio->config.out.bitrate * 1000;
        if (tmp < bitrates[0].mMinimum)
            tmp = bitrates[0].mMinimum;
        if (tmp > bitrates[bitrateCounts-1].mMinimum)
            tmp = bitrates[bitrateCounts-1].mMinimum;
        free(bitrates);
        if (tmp != audio->config.out.bitrate * 1000)
        {
            hb_log("encCoreAudioInit: sanitizing track %d audio bitrate %d to %"PRIu32"",
                   audio->config.out.track, audio->config.out.bitrate, tmp / 1000);
        }
        AudioConverterSetProperty(pv->converter,
                                  kAudioConverterEncodeBitRate,
                                  sizeof(tmp), &tmp);
    }
    else if (audio->config.out.quality >= 0)
    {
        if (mode != AAC_MODE_LC)
        {
            hb_error("encCoreAudioInit: internal error, VBR set but not applicable");
            return 1;
        }
        // set encoder bitrate control mode to variable
        tmp = kAudioCodecBitRateControlMode_Variable;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertyBitRateControlMode,
                                  sizeof(tmp), &tmp);

        // set quality
        tmp = audio->config.out.quality;
        AudioConverterSetProperty(pv->converter,
                                  kAudioCodecPropertySoundQualityForVBR,
                                  sizeof(tmp), &tmp);
    }
    else
    {
        hb_error("encCoreAudioInit: internal error, bitrate/quality not set");
        return 1;
    }

    // get real input
    tmpsiz = sizeof(input);
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCurrentInputStreamDescription,
                              &tmpsiz, &input);
    // get real output
    tmpsiz = sizeof(output);
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCurrentOutputStreamDescription,
                              &tmpsiz, &output);

    // set sizes
    pv->isamplesiz  = input.mBytesPerPacket;
    pv->isamples    = output.mFramesPerPacket;
    pv->osamplerate = output.mSampleRate;
    audio->config.out.samples_per_frame = pv->isamples;

    // channel remapping
    pv->remap = hb_audio_remap_init(AV_SAMPLE_FMT_FLT, &hb_aac_chan_map,
                                    audio->config.in.channel_map);
    if (pv->remap == NULL)
    {
        hb_error("encCoreAudioInit: hb_audio_remap_init() failed");
    }
    uint64_t layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
    hb_audio_remap_set_channel_layout(pv->remap, layout);

    // get maximum output size
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterPropertyMaximumOutputPacketSize,
                              &tmpsiz, &tmp);
    pv->omaxpacket = tmp;

    // get magic cookie (elementary stream descriptor)
    tmp = HB_CONFIG_MAX_SIZE;
    AudioConverterGetProperty(pv->converter,
                              kAudioConverterCompressionMagicCookie,
                              &tmp, w->config->extradata.bytes);
    // CoreAudio returns a complete ESDS, but we only need
    // the DecoderSpecific info.
    UInt8* buffer = NULL;
    ReadESDSDescExt(w->config->extradata.bytes, &buffer, &tmpsiz, 0);
    w->config->extradata.length = tmpsiz;
    memmove(w->config->extradata.bytes, buffer, w->config->extradata.length);
    free(buffer);

    pv->list = hb_list_init();
    pv->buf = NULL;

    return 0;
}
Пример #23
0
/***********************************************************************
 * hb_work_encCoreAudio_init
 ***********************************************************************
 *
 **********************************************************************/
int encCoreAudioInit( hb_work_object_t * w, hb_job_t * job, enum AAC_MODE mode )
{
    hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
    hb_audio_t * audio = w->audio;
    AudioStreamBasicDescription input, output;
    UInt32 tmp, tmpsiz = sizeof( tmp );
    OSStatus err;

    w->private_data = pv;
    pv->job = job;

    // pass the number of channels used into the private work data
    pv->nchannels = HB_AMIXDOWN_GET_DISCRETE_CHANNEL_COUNT( audio->config.out.mixdown );

    bzero( &input, sizeof( AudioStreamBasicDescription ) );
    input.mSampleRate = ( Float64 ) audio->config.out.samplerate;
    input.mFormatID = kAudioFormatLinearPCM;
    input.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagsNativeEndian;
    input.mBytesPerPacket = 4 * pv->nchannels;
    input.mFramesPerPacket = 1;
    input.mBytesPerFrame = input.mBytesPerPacket * input.mFramesPerPacket;
    input.mChannelsPerFrame = pv->nchannels;
    input.mBitsPerChannel = 32;

    bzero( &output, sizeof( AudioStreamBasicDescription ) );
    switch ( mode ) 
    {
        case AAC_MODE_HE:
            output.mFormatID = kAudioFormatMPEG4AAC_HE;
            break;
        case AAC_MODE_LC:
        default:
            output.mFormatID = kAudioFormatMPEG4AAC;
            break;
    }
    output.mSampleRate = ( Float64 ) audio->config.out.samplerate;
    output.mChannelsPerFrame = pv->nchannels;
    // let CoreAudio decide the rest...

    // initialise encoder
    err = AudioConverterNew( &input, &output, &pv->converter );
    if( err != noErr)
    {
        // Retry without the samplerate
        bzero( &output, sizeof( AudioStreamBasicDescription ) );
        switch ( mode )
        {
            case AAC_MODE_HE:
                output.mFormatID = kAudioFormatMPEG4AAC_HE;
                break;
            case AAC_MODE_LC:
            default:
                output.mFormatID = kAudioFormatMPEG4AAC;
                break;
        }
        output.mChannelsPerFrame = pv->nchannels;

        err = AudioConverterNew( &input, &output, &pv->converter );

        if( err != noErr)
        {
            hb_log( "Error creating an AudioConverter err=%"PRId64" %"PRIu64, (int64_t)err, (uint64_t)output.mBytesPerFrame );
            *job->die = 1;
            return 0;
        }
    }

    if( ( audio->config.out.mixdown == HB_AMIXDOWN_6CH ) && ( audio->config.in.codec == HB_ACODEC_AC3) )
    {
        SInt32 channelMap[6] = { 2, 1, 3, 4, 5, 0 };
        AudioConverterSetProperty( pv->converter, kAudioConverterChannelMap,
                                   sizeof( channelMap ), channelMap );
    }

    // set encoder quality to maximum
    tmp = kAudioConverterQuality_Max;
    AudioConverterSetProperty( pv->converter, kAudioConverterCodecQuality,
                               sizeof( tmp ), &tmp );

    // set encoder bitrate control mode to constrained variable
    tmp = kAudioCodecBitRateControlMode_VariableConstrained;
    AudioConverterSetProperty( pv->converter, kAudioCodecPropertyBitRateControlMode,
                               sizeof( tmp ), &tmp );

    // get available bitrates
    AudioValueRange *bitrates;
    ssize_t bitrateCounts;
    err = AudioConverterGetPropertyInfo( pv->converter, kAudioConverterApplicableEncodeBitRates,
                                         &tmpsiz, NULL);
    bitrates = malloc( tmpsiz );
    err = AudioConverterGetProperty( pv->converter, kAudioConverterApplicableEncodeBitRates,
                                     &tmpsiz, bitrates);
    bitrateCounts = tmpsiz / sizeof( AudioValueRange );

    // set bitrate
    tmp = audio->config.out.bitrate * 1000;
    if( tmp < bitrates[0].mMinimum )
        tmp = bitrates[0].mMinimum;
    if( tmp > bitrates[bitrateCounts-1].mMinimum )
        tmp = bitrates[bitrateCounts-1].mMinimum;
    free( bitrates );
    if( tmp != audio->config.out.bitrate * 1000 )
        hb_log( "encca_aac: sanitizing track %d audio bitrate %d to %"PRIu32"", 
                audio->config.out.track, audio->config.out.bitrate, tmp/1000 );
    AudioConverterSetProperty( pv->converter, kAudioConverterEncodeBitRate,
                               sizeof( tmp ), &tmp );

    // get real input
    tmpsiz = sizeof( input );
    AudioConverterGetProperty( pv->converter,
                               kAudioConverterCurrentInputStreamDescription,
                               &tmpsiz, &input );
    // get real output
    tmpsiz = sizeof( output );
    AudioConverterGetProperty( pv->converter,
                               kAudioConverterCurrentOutputStreamDescription,
                               &tmpsiz, &output );

    // set sizes
    pv->isamplesiz  = input.mBytesPerPacket;
    pv->isamples    = output.mFramesPerPacket;
    pv->osamplerate = output.mSampleRate;

    // get maximum output size
    AudioConverterGetProperty( pv->converter,
                               kAudioConverterPropertyMaximumOutputPacketSize,
                               &tmpsiz, &tmp );
    pv->omaxpacket = tmp;

    // get magic cookie (elementary stream descriptor)
    tmp = HB_CONFIG_MAX_SIZE;
    AudioConverterGetProperty( pv->converter,
                               kAudioConverterCompressionMagicCookie,
                               &tmp, w->config->aac.bytes );
    // CoreAudio returns a complete ESDS, but we only need
    // the DecoderSpecific info.
    UInt8* buffer = NULL;
    ReadESDSDescExt(w->config->aac.bytes, &buffer, &tmpsiz, 0);
    w->config->aac.length = tmpsiz;
    memmove( w->config->aac.bytes, buffer,
             w->config->aac.length );

    pv->list = hb_list_init();
    pv->buf = NULL;

    return 0;
}
Пример #24
0
OSStatus AudioConverterNewSpecific(const AudioStreamBasicDescription* inSourceFormat, const AudioStreamBasicDescription* inDestinationFormat, UInt32 inNumberClassDescriptions, AudioClassDescription* nClassDescriptions, AudioConverterRef* outAudioConverter)
{
	// FIXME
	return AudioConverterNew(inSourceFormat, inDestinationFormat, outAudioConverter);
}
Пример #25
0
//-----------------------------------------------------------------------------
bool AudioFile::Load()
{
#if MAC
	AudioFileID mAudioFileID;
    AudioStreamBasicDescription fileDescription, outputFormat;
    SInt64 dataSize64;
    UInt32 dataSize;
	
	OSStatus err;
	UInt32 size;
	
    // ファイルを開く
	FSRef	ref;
	Boolean	isDirectory=false;
	FSPathMakeRef((const UInt8*)GetFilePath(), &ref, &isDirectory);
	
	err = AudioFileOpen(&ref, fsRdPerm, 0, &mAudioFileID);
    if (err) {
        //NSLog(@"AudioFileOpen failed");
        return false;
    }
	
    // 開いたファイルの基本情報を fileDescription へ
    size = sizeof(AudioStreamBasicDescription);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyDataFormat, 
							   &size, &fileDescription);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
    // 開いたファイルのデータ部のバイト数を dataSize へ
    size = sizeof(SInt64);
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyAudioDataByteCount, 
							   &size, &dataSize64);
    if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	dataSize = static_cast<UInt32>(dataSize64);
	
	AudioFileTypeID	fileTypeID;
	size = sizeof( AudioFileTypeID );
	err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFileFormat, &size, &fileTypeID);
	if (err) {
        //NSLog(@"AudioFileGetProperty failed");
        AudioFileClose(mAudioFileID);
        return false;
    }
	
	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;
	
	//ループポイントの取得
	Float64		st_point=0.0,end_point=0.0;
	if ( fileTypeID == kAudioFileAIFFType || fileTypeID == kAudioFileAIFCType ) {
		//INSTチャンクの取得
		AudioFileGetUserDataSize(mAudioFileID, 'INST', 0, &size);
		if ( size > 4 ) {
			UInt8	*instChunk = new UInt8[size];
			AudioFileGetUserData(mAudioFileID, 'INST', 0, &size, instChunk);
			
			//MIDI情報の取得
			mInstData.basekey = instChunk[0];
			mInstData.lowkey = instChunk[2];
			mInstData.highkey = instChunk[3];
			
			if ( instChunk[9] > 0 ) {	//ループフラグを確認
				//マーカーの取得
				UInt32	writable;
				err = AudioFileGetPropertyInfo(mAudioFileID, kAudioFilePropertyMarkerList,
											   &size, &writable);
				if (err) {
					//NSLog(@"AudioFileGetPropertyInfo failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				UInt8	*markersBuffer = new UInt8[size];
				AudioFileMarkerList	*markers = reinterpret_cast<AudioFileMarkerList*>(markersBuffer);
				
				err = AudioFileGetProperty(mAudioFileID, kAudioFilePropertyMarkerList, 
										   &size, markers);
				if (err) {
					//NSLog(@"AudioFileGetProperty failed");
					AudioFileClose(mAudioFileID);
					return NULL;
				}
				
				//ループポイントの設定
				for (unsigned int i=0; i<markers->mNumberMarkers; i++) {
					if (markers->mMarkers[i].mMarkerID == instChunk[11] ) {
						st_point = markers->mMarkers[i].mFramePosition;
					}
					else if (markers->mMarkers[i].mMarkerID == instChunk[13] ) {
						end_point = markers->mMarkers[i].mFramePosition;
					}
					CFRelease(markers->mMarkers[i].mName);
				}
				if ( st_point < end_point ) {
					mInstData.loop = 1;
				}
				delete [] markersBuffer;
			}
			delete [] instChunk;
		}
		
	}
	else if ( fileTypeID == kAudioFileWAVEType ) {
		//smplチャンクの取得
		AudioFileGetUserDataSize( mAudioFileID, 'smpl', 0, &size );
		if ( size >= sizeof(WAV_smpl) ) {
			UInt8	*smplChunk = new UInt8[size];
			AudioFileGetUserData( mAudioFileID, 'smpl', 0, &size, smplChunk );
			WAV_smpl	*smpl = (WAV_smpl *)smplChunk;
			
			smpl->loops = EndianU32_LtoN( smpl->loops );
			
			if ( smpl->loops > 0 ) {
				mInstData.loop = true;
				mInstData.basekey = EndianU32_LtoN( smpl->note );
				st_point = EndianU32_LtoN( smpl->start );
				end_point = EndianU32_LtoN( smpl->end ) + 1;	//SoundForge等では最終ポイントを含める解釈
				//end_point = EndianU32_LtoN( smpl->end );	//PeakではなぜかAIFFと同じ
			}
			else {
				mInstData.basekey = EndianU32_LtoN( smpl->note );
			}
			delete [] smplChunk;
		}
	}
	
	//容量の制限
	SInt64	dataSamples = dataSize / fileDescription.mBytesPerFrame;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * fileDescription.mBytesPerFrame;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
    // 波形一時読み込み用メモリを確保
    char *fileBuffer;
	unsigned int	fileBufferSize;
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*fileDescription.mBytesPerFrame;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	err = AudioFileReadBytes(mAudioFileID, false, 0, &dataSize, fileBuffer);
    if (err) {
        //NSLog(@"AudioFileReadBytes failed");
        AudioFileClose(mAudioFileID);
        delete [] fileBuffer;
        return false;
    }
    AudioFileClose(mAudioFileID);
	
    //ループを展開する
    Float64	adjustment = 1.0;
    outputFormat=fileDescription;
	if (mInstData.loop) {
		UInt32	plusalpha=0, framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*fileDescription.mBytesPerFrame,
				   fileBuffer+(int)st_point*fileDescription.mBytesPerFrame,
				   framestocopy*fileDescription.mBytesPerFrame);
			plusalpha += framestocopy;
		}
		dataSize += plusalpha*fileDescription.mBytesPerFrame;
		
		//16サンプル境界にFIXする
		adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		st_point *= adjustment;
		end_point *= adjustment;
	}
	outputFormat.mFormatID = kAudioFormatLinearPCM;
    outputFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian;
	outputFormat.mChannelsPerFrame = 1;
	outputFormat.mBytesPerFrame = sizeof(float);
	outputFormat.mBitsPerChannel = 32;
	outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame;
	
    // バイトオーダー変換用のコンバータを用意
    AudioConverterRef converter;
	err = AudioConverterNew(&fileDescription, &outputFormat, &converter);
    if (err) {
        //NSLog(@"AudioConverterNew failed");
        delete [] fileBuffer;
        return false;
    }
	
	//サンプリングレート変換の質を最高に設定
//	if (fileDescription.mSampleRate != outputFormat.mSampleRate) {
//		size = sizeof(UInt32);
//		UInt32	setProp = kAudioConverterQuality_Max;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterQuality,
//								  size, &setProp);
//        
//        size = sizeof(UInt32);
//		setProp = kAudioConverterSampleRateConverterComplexity_Mastering;
//		AudioConverterSetProperty(converter, kAudioConverterSampleRateConverterComplexity,
//								  size, &setProp);
//        
//	}
	
    //出力に必要十分なバッファサイズを得る
	UInt32	outputSize = dataSize;
	size = sizeof(UInt32);
	err = AudioConverterGetProperty(converter, kAudioConverterPropertyCalculateOutputBufferSize, 
									&size, &outputSize);
	if (err) {
		//NSLog(@"AudioConverterGetProperty failed");
		delete [] fileBuffer;
		AudioConverterDispose(converter);
        return false;
	}
    UInt32 monoSamples = outputSize/sizeof(float);
    
    // バイトオーダー変換
    float *monoData = new float[monoSamples];
	AudioConverterConvertBuffer(converter, dataSize, fileBuffer,
								&outputSize, monoData);
    if(outputSize == 0) {
        //NSLog(@"AudioConverterConvertBuffer failed");
        delete [] fileBuffer;
        AudioConverterDispose(converter);
        return false;
    }
    
    //ループ長が16の倍数でない場合はサンプリングレート変換
    Float64 inputSampleRate = fileDescription.mSampleRate;
    Float64 outputSampleRate = fileDescription.mSampleRate * adjustment;
    int	outSamples = monoSamples;
    if ( outputSampleRate == inputSampleRate ) {
        m_pAudioData = new short[monoSamples];
        for (int i=0; i<monoSamples; i++) {
            m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
        }
    }
    else {
        outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
        m_pAudioData = new short[outSamples];
        resampling(monoData, monoSamples, inputSampleRate,
                   m_pAudioData, &outSamples, outputSampleRate);
    }
    
    // 後始末
    delete [] monoData;
    delete [] fileBuffer;
    AudioConverterDispose(converter);
	
	//Instデータの設定
	if ( st_point > MAXIMUM_SAMPLES ) {
		mInstData.lp = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp			= st_point;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		mInstData.lp_end = MAXIMUM_SAMPLES;
	}
	else {
		mInstData.lp_end		= end_point;
	}
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;
	
	mIsLoaded = true;
	
	return true;
#else
	//Windowsのオーディオファイル読み込み処理

	// ファイルを開く
	HMMIO	hmio = NULL;
	MMRESULT	err;
	DWORD		size;

	hmio = mmioOpen( mPath, NULL, MMIO_READ );
	if ( !hmio ) {
		return false;
	}
	
	// RIFFチャンクを探す
	MMCKINFO	riffChunkInfo;
	riffChunkInfo.fccType = mmioFOURCC('W', 'A', 'V', 'E');
	err = mmioDescend( hmio, &riffChunkInfo, NULL, MMIO_FINDRIFF );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( (riffChunkInfo.ckid != FOURCC_RIFF) || (riffChunkInfo.fccType != mmioFOURCC('W', 'A', 'V', 'E') ) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// フォーマットチャンクを探す
	MMCKINFO	formatChunkInfo;
	formatChunkInfo.ckid = mmioFOURCC('f', 'm', 't', ' ');
	err = mmioDescend( hmio, &formatChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( formatChunkInfo.cksize < sizeof(PCMWAVEFORMAT) ) {
		mmioClose( hmio, 0 );
		return false;
	}

	//フォーマット情報を取得
	WAVEFORMATEX	pcmWaveFormat;
	DWORD			fmsize = (formatChunkInfo.cksize > sizeof(WAVEFORMATEX)) ? sizeof(WAVEFORMATEX):formatChunkInfo.cksize;
	size = mmioRead( hmio, (HPSTR)&pcmWaveFormat, fmsize );
	if ( size != fmsize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	if ( pcmWaveFormat.wFormatTag != WAVE_FORMAT_PCM ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	// Instrument情報を初期化
	mInstData.basekey	= 60;
	mInstData.lowkey	= 0;
	mInstData.highkey	= 127;
	mInstData.loop		= 0;

	//smplチャンクを探す
	MMCKINFO	smplChunkInfo;
	smplChunkInfo.ckid = mmioFOURCC('s', 'm', 'p', 'l');
	err = mmioDescend( hmio, &smplChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if ( err != MMSYSERR_NOERROR ) {
		smplChunkInfo.cksize = 0;
	}
	double	st_point=0.0;
	double	end_point=0.0;
	if ( smplChunkInfo.cksize >= sizeof(WAV_smpl) ) {
		//ループポイントの取得
		unsigned char	*smplChunk = new unsigned char[smplChunkInfo.cksize];
		size = mmioRead(hmio,(HPSTR)smplChunk, smplChunkInfo.cksize);
		WAV_smpl	*smpl = (WAV_smpl *)smplChunk;

		if ( smpl->loops > 0 ) {
			mInstData.loop = 1;
			mInstData.basekey = smpl->note;
			st_point = smpl->start;
			end_point = smpl->end + 1;	//SoundForge等では最終ポイントを含める解釈
		}
		else {
			mInstData.basekey = smpl->note;
		}
		delete [] smplChunk;
	}
	mmioAscend(hmio, &formatChunkInfo, 0);

	//dataチャンクを探す
	MMCKINFO dataChunkInfo;
	dataChunkInfo.ckid = mmioFOURCC('d', 'a', 't', 'a');
	err = mmioDescend( hmio, &dataChunkInfo, &riffChunkInfo, MMIO_FINDCHUNK );
	if( err != MMSYSERR_NOERROR ) {
		mmioClose( hmio, 0 );
		return false;
	}

	// 波形一時読み込み用メモリを確保
	unsigned int	dataSize = dataChunkInfo.cksize;
	int				bytesPerSample = pcmWaveFormat.nBlockAlign;
	char			*fileBuffer;
	unsigned int	fileBufferSize;

	//容量制限
	int	dataSamples = dataSize / pcmWaveFormat.nBlockAlign;
	if ( dataSamples > MAXIMUM_SAMPLES ) {
		dataSize = MAXIMUM_SAMPLES * pcmWaveFormat.nBlockAlign;
	}
	if ( st_point > MAXIMUM_SAMPLES ) {
		st_point = MAXIMUM_SAMPLES;
	}
	if ( end_point > MAXIMUM_SAMPLES ) {
		end_point = MAXIMUM_SAMPLES;
	}
	
	
	if (mInstData.loop) {
		fileBufferSize = dataSize+EXPAND_BUFFER*bytesPerSample;
	}
	else {
		fileBufferSize = dataSize;
	}
	fileBuffer = new char[fileBufferSize];
	memset(fileBuffer, 0, fileBufferSize);
	
	// ファイルから波形データの読み込み
	size = mmioRead(hmio, (HPSTR)fileBuffer, dataSize);
	if ( size != dataSize ) {
		mmioClose( hmio, 0 );
		return false;
	}
	mmioClose(hmio,0);

	//ループを展開する
	double	inputSampleRate = pcmWaveFormat.nSamplesPerSec;
	double	outputSampleRate = inputSampleRate;
	if (mInstData.loop) {
		unsigned int	plusalpha=0;
		double			framestocopy;
		while (plusalpha < EXPAND_BUFFER) {
			framestocopy = 
			(end_point-st_point)>(EXPAND_BUFFER-plusalpha)?(EXPAND_BUFFER-plusalpha):end_point-st_point;
			memcpy(fileBuffer+((int)end_point+plusalpha)*bytesPerSample,
				   fileBuffer+(int)st_point*bytesPerSample,
				   static_cast<size_t>(framestocopy*bytesPerSample));
			plusalpha += static_cast<unsigned int>(framestocopy);
		}
		dataSize += plusalpha*bytesPerSample;
		
		//16サンプル境界にFIXする
		double	adjustment = ( (long long)((end_point-st_point)/16) ) / ((end_point-st_point)/16.0);
		outputSampleRate *= adjustment;
		st_point *= adjustment;
		end_point *= adjustment;
	}

	//一旦floatモノラルデータに変換
	int	bytesPerChannel = bytesPerSample / pcmWaveFormat.nChannels;
	unsigned int	inputPtr = 0;
	unsigned int	outputPtr = 0;
	int				monoSamples = dataSize / bytesPerSample;
	float	range = static_cast<float>((1<<(bytesPerChannel*8-1)) * pcmWaveFormat.nChannels);
	float	*monoData = new float[monoSamples];
	while (inputPtr < dataSize) {
		int	frameSum = 0;
		for (int ch=0; ch<pcmWaveFormat.nChannels; ch++) {
			for (int i=0; i<bytesPerChannel; i++) {
				if (i<bytesPerChannel-1) {
					frameSum += (unsigned char)fileBuffer[inputPtr] << (8*i);
				}
				else {
					frameSum += fileBuffer[inputPtr] << (8*i);
				}
				inputPtr++;
			}
		}
		monoData[outputPtr] = frameSum / range;
		outputPtr++;
	}

	//ループ長が16の倍数でない場合はサンプリングレート変換
	int	outSamples = monoSamples;
	if ( outputSampleRate == inputSampleRate ) {
		m_pAudioData = new short[monoSamples];
		for (int i=0; i<monoSamples; i++) {
			m_pAudioData[i] = static_cast<short>(monoData[i] * 32768);
		}
	}
	else {
		outSamples = static_cast<int>(monoSamples / (inputSampleRate / outputSampleRate));
		m_pAudioData = new short[outSamples];
		resampling(monoData, monoSamples, inputSampleRate,
				   m_pAudioData, &outSamples, outputSampleRate);
	}

	// 後始末
	delete [] fileBuffer;
	delete [] monoData;

	//Instデータの設定
	mInstData.lp			= static_cast<int>(st_point);
	mInstData.lp_end		= static_cast<int>(end_point);
	mInstData.srcSamplerate	= outputSampleRate;
    mLoadedSamples			= outSamples;

	mIsLoaded = true;

	return true;
#endif
}
Пример #26
0
// _______________________________________________________________________________________
//
void	CAAudioFile::SetClientFormat(const CAStreamBasicDescription &dataFormat, const CAAudioChannelLayout *layout)
{
	LOG_FUNCTION("CAAudioFile::SetClientFormat", "%p", this);
	XThrowIf(!dataFormat.IsPCM(), kExtAudioFileError_NonPCMClientFormat, "non-PCM client format on audio file");

	bool dataFormatChanging = (mClientDataFormat.mFormatID == 0 || mClientDataFormat != dataFormat);

	if (dataFormatChanging) {
		CloseConverter();
		if (mWriteBufferList) {
			delete mWriteBufferList;
			mWriteBufferList = NULL;
		}
		mClientDataFormat = dataFormat;
	}

	if (layout && layout->IsValid()) {
		XThrowIf(layout->NumberChannels() != mClientDataFormat.NumberChannels(), kExtAudioFileError_InvalidChannelMap, "inappropriate channel map");
		mClientChannelLayout = *layout;
	}

	bool differentLayouts;
	if (mClientChannelLayout.IsValid()) {
		if (mFileChannelLayout.IsValid()) {
			differentLayouts = mClientChannelLayout.Tag() != mFileChannelLayout.Tag();
#if VERBOSE_CHANNELMAP
			printf("two valid layouts, %s\n", differentLayouts ? "different" : "same");
#endif
		} else {
			differentLayouts = false;
#if VERBOSE_CHANNELMAP
			printf("valid client layout, unknown file layout\n");
#endif
		}
	} else {
		differentLayouts = false;
#if VERBOSE_CHANNELMAP
		if (mFileChannelLayout.IsValid())
			printf("valid file layout, unknown client layout\n");
		else
			printf("two invalid layouts\n");
#endif
	}

	if (mClientDataFormat != mFileDataFormat || differentLayouts) {
		// We need an AudioConverter.
		if (mMode == kReading) {
			// file -> client (decode)
//mFileDataFormat.PrintFormat(  stdout, "", "File:   ");
//mClientDataFormat.PrintFormat(stdout, "", "Client: ");

			if (mConverter == NULL)
				XThrowIfError(AudioConverterNew(&mFileDataFormat, &mClientDataFormat, &mConverter),
				"create audio converter");

#if VERBOSE_CONVERTER
			printf("CAAudioFile %p -- created converter\n", this);
			CAShow(mConverter);
#endif
			// set the magic cookie, if any (for decode)
			if (mMagicCookie)
				SetConverterProperty(kAudioConverterDecompressionMagicCookie, mMagicCookieSize, mMagicCookie, mFileDataFormat.IsPCM());
					// we get cookies from some AIFF's but the converter barfs on them,
					// so we set canFail to true for PCM

			SetConverterChannelLayout(false, mFileChannelLayout);
			SetConverterChannelLayout(true, mClientChannelLayout);

			// propagate leading/trailing frame counts
			if (mFileDataFormat.mBitsPerChannel == 0) {
				UInt32 propertySize;
				OSStatus err;
				AudioFilePacketTableInfo pti;
				propertySize = sizeof(pti);
				err = AudioFileGetProperty(mAudioFile, kAudioFilePropertyPacketTableInfo, &propertySize, &pti);
				if (err == noErr && (pti.mPrimingFrames > 0 || pti.mRemainderFrames > 0)) {
					AudioConverterPrimeInfo primeInfo;
					primeInfo.leadingFrames = pti.mPrimingFrames;
					primeInfo.trailingFrames = pti.mRemainderFrames;
					/* ignore any error. better to play it at all than not. */
					/*err = */AudioConverterSetProperty(mConverter, kAudioConverterPrimeInfo, sizeof(primeInfo), &primeInfo);
					//XThrowIfError(err, "couldn't set prime info on converter");
				}
			}
		} else if (mMode == kPreparingToCreate || mMode == kPreparingToWrite) {
			// client -> file (encode)
			if (mConverter == NULL)
				XThrowIfError(AudioConverterNew(&mClientDataFormat, &mFileDataFormat, &mConverter), "create audio converter");
			mWriteBufferList = CABufferList::New("", mClientDataFormat);
			SetConverterChannelLayout(false, mClientChannelLayout);
			SetConverterChannelLayout(true, mFileChannelLayout);
			if (mMode == kPreparingToWrite)
				FileFormatChanged();
		} else
			XThrowIfError(kExtAudioFileError_InvalidOperationOrder, "audio file format not yet known");
	}
	UpdateClientMaxPacketSize();
}
Пример #27
0
bool FCoreAudioSoundSource::AttachToAUGraph()
{
	AudioChannel = FindFreeAudioChannel();

	OSStatus ErrorStatus = noErr;
	AUNode DestNode = -1;
	int32 DestInputNumber = 0;
	AudioStreamBasicDescription* StreamFormat = NULL;
	
	if( Buffer->NumChannels < 3 )
	{
		ErrorStatus = AudioConverterNew( &Buffer->PCMFormat, &AudioDevice->Mixer3DFormat, &CoreAudioConverter );
		DestNode = AudioDevice->GetMixer3DNode();
		MixerInputNumber = DestInputNumber = AudioDevice->GetFreeMixer3DInput();
		
		uint32 SpatialSetting = ( Buffer->NumChannels == 1 ) ? kSpatializationAlgorithm_SoundField : kSpatializationAlgorithm_StereoPassThrough;
		AudioUnitSetProperty( AudioDevice->GetMixer3DUnit(), kAudioUnitProperty_SpatializationAlgorithm, kAudioUnitScope_Input, MixerInputNumber, &SpatialSetting, sizeof( SpatialSetting ) );
		AudioUnitSetParameter( AudioDevice->GetMixer3DUnit(), k3DMixerParam_Distance, kAudioUnitScope_Input, MixerInputNumber, 1.0f, 0 );

		StreamFormat = &AudioDevice->Mixer3DFormat;
	}
	else
	{
		MixerInputNumber = DestInputNumber = AudioDevice->GetFreeMatrixMixerInput();
		DestNode = AudioDevice->GetMatrixMixerNode();
		StreamFormat = &AudioDevice->MatrixMixerInputFormat;
		
		ErrorStatus = AudioConverterNew( &Buffer->PCMFormat, &AudioDevice->MatrixMixerInputFormat, &CoreAudioConverter );

		bool bIs6ChannelOGG = Buffer->NumChannels == 6 && (Buffer->DecompressionState || WaveInstance->WaveData->bDecompressedFromOgg);

		AudioDevice->SetupMatrixMixerInput( DestInputNumber, bIs6ChannelOGG );
	}

	if( ErrorStatus != noErr )
	{
		UE_LOG(LogCoreAudio, Warning, TEXT("CoreAudioConverter creation failed, error code %d"), ErrorStatus);
	}

	int FiltersNeeded = 0;
#if EQ_ENABLED
	bool bNeedEQFilter = IsEQFilterApplied();
	if( bNeedEQFilter )
	{
		++FiltersNeeded;
	}
#else
	bool bNeedEQFilter = false;
#endif

#if RADIO_ENABLED
	bool bNeedRadioFilter = Effects->bRadioAvailable && WaveInstance->bApplyRadioFilter;
	if( bNeedRadioFilter )
	{
		++FiltersNeeded;
	}
#else
	bool bNeedRadioFilter = false;
#endif

#if REVERB_ENABLED
	bool bNeedReverbFilter = bReverbApplied;
	if( bNeedReverbFilter )
	{
		++FiltersNeeded;
	}
#else
	bool bNeedReverbFilter = false;
#endif

	if( FiltersNeeded > 0 )
	{
		uint32 BusCount = FiltersNeeded + ( bNeedEQFilter ? 0 : 1 );	// one for each filter, plus one for dry voice if there's no EQ filter

		// Prepare Voice Merger
		SAFE_CA_CALL( CreateAudioUnit( kAudioUnitType_Mixer, kAudioUnitSubType_MatrixMixer, kAudioUnitManufacturer_Apple, NULL, NULL, &StreamMergerNode, &StreamMergerUnit ) );

		// Set Bus Counts
		uint32 NumBuses = BusCount;
		SAFE_CA_CALL( AudioUnitSetProperty( StreamMergerUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &NumBuses, sizeof(uint32) ) );
		NumBuses = 1;
		SAFE_CA_CALL( AudioUnitSetProperty(	StreamMergerUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Output, 0, &NumBuses, sizeof(uint32) ) );

		// Set Input Formats
		for( int32 InputIndex = 0; InputIndex < BusCount; ++InputIndex )
		{
			SAFE_CA_CALL( AudioUnitSetProperty( StreamMergerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, InputIndex, StreamFormat, sizeof( AudioStreamBasicDescription ) ) );
		}

		// Set Output Format
		SAFE_CA_CALL( AudioUnitSetProperty( StreamMergerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, StreamFormat, sizeof( AudioStreamBasicDescription ) ) );

		SAFE_CA_CALL( AudioUnitInitialize( StreamMergerUnit ) );

		// Set Master volume
		SAFE_CA_CALL( AudioUnitSetParameter( StreamMergerUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Global, 0xFFFFFFFF, 1.0, 0 ) );

		// Enable Output
		SAFE_CA_CALL( AudioUnitSetParameter( StreamMergerUnit, kMatrixMixerParam_Enable, kAudioUnitScope_Output, 0, 1.0, 0 ) );

		// Set Output volumes
		for( int32 ChannelIndex = 0; ChannelIndex < StreamFormat->mChannelsPerFrame; ++ChannelIndex )
		{
			SAFE_CA_CALL( AudioUnitSetParameter( StreamMergerUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Output, ChannelIndex, 1.0, 0 ) );
		}

		for( int32 InputIndex = 0; InputIndex < BusCount; ++InputIndex )
		{
			// Enable Input
			SAFE_CA_CALL( AudioUnitSetParameter( StreamMergerUnit, kMatrixMixerParam_Enable, kAudioUnitScope_Input, InputIndex, 1.0, 0 ) );
		}

		for( int32 ChannelIndex = 0; ChannelIndex < StreamFormat->mChannelsPerFrame; ++ChannelIndex )
		{
			for( int32 InputBusIndex = 0; InputBusIndex < BusCount; ++InputBusIndex )
			{
				int32 InputChannelIndex = InputBusIndex*StreamFormat->mChannelsPerFrame + ChannelIndex;

				// Set Input Channel Volume
				SAFE_CA_CALL( AudioUnitSetParameter( StreamMergerUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Input, InputChannelIndex, 1.0, 0 ) );

				// Set Crossfade Volume - each input channel goes to specific output channel. The rest of connections is left at zero.
				SAFE_CA_CALL( AudioUnitSetParameter( StreamMergerUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Global, ( InputChannelIndex << 16 ) | ChannelIndex, 1.0, 0 ) );
			}
		}

		SAFE_CA_CALL( AUGraphConnectNodeInput( AudioDevice->GetAudioUnitGraph(), StreamMergerNode, 0, DestNode, DestInputNumber ) );

		DestNode = StreamMergerNode;
		DestInputNumber = 0;

		// Prepare and initialize stream splitter
		SAFE_CA_CALL( CreateAudioUnit( kAudioUnitType_Mixer, kAudioUnitSubType_MatrixMixer, kAudioUnitManufacturer_Apple, NULL, NULL, &StreamSplitterNode, &StreamSplitterUnit ) );

		// Set bus counts
		NumBuses = 1;
		SAFE_CA_CALL( AudioUnitSetProperty( StreamSplitterUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &NumBuses, sizeof(uint32) ) );
		NumBuses = BusCount;
		SAFE_CA_CALL( AudioUnitSetProperty(	StreamSplitterUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Output, 0, &NumBuses, sizeof(uint32) ) );

		// Set Input format
		SAFE_CA_CALL( AudioUnitSetProperty( StreamSplitterUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, StreamFormat, sizeof( AudioStreamBasicDescription ) ) );

		// Set Output formats
		for( int32 OutputIndex = 0; OutputIndex < BusCount; ++OutputIndex )
		{
			SAFE_CA_CALL( AudioUnitSetProperty( StreamSplitterUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, OutputIndex, StreamFormat, sizeof( AudioStreamBasicDescription ) ) );
		}

		SAFE_CA_CALL( AudioUnitInitialize( StreamSplitterUnit ) );

		// Set Master volume
		SAFE_CA_CALL( AudioUnitSetParameter( StreamSplitterUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Global, 0xFFFFFFFF, 1.0, 0 ) );

		// Enable Input
		SAFE_CA_CALL( AudioUnitSetParameter( StreamSplitterUnit, kMatrixMixerParam_Enable, kAudioUnitScope_Input, 0, 1.0, 0 ) );

		// Set Input Volumes
		for( int32 ChannelIndex = 0; ChannelIndex < StreamFormat->mChannelsPerFrame; ++ChannelIndex )
		{
			SAFE_CA_CALL( AudioUnitSetParameter( StreamSplitterUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Input, ChannelIndex, 1.0, 0 ) );
		}

		for( int32 OutputIndex = 0; OutputIndex < BusCount; ++OutputIndex )
		{
			// Enable Output
			SAFE_CA_CALL( AudioUnitSetParameter( StreamSplitterUnit, kMatrixMixerParam_Enable, kAudioUnitScope_Output, OutputIndex, 1.0, 0 ) );
		}

		for( int32 ChannelIndex = 0; ChannelIndex < StreamFormat->mChannelsPerFrame; ++ChannelIndex )
		{
			for( int32 OutputBusIndex = 0; OutputBusIndex < BusCount; ++OutputBusIndex )
			{
				int32 OutputChannelIndex = OutputBusIndex*StreamFormat->mChannelsPerFrame + ChannelIndex;

				// Set Output Channel Volume
				SAFE_CA_CALL( AudioUnitSetParameter( StreamSplitterUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Output, OutputChannelIndex, 1.0, 0 ) );

				// Set Crossfade Volume - each output channel goes from specific input channel. The rest of connections is left at zero.
				SAFE_CA_CALL( AudioUnitSetParameter( StreamSplitterUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Global, ( ChannelIndex << 16 ) | OutputChannelIndex, 1.0, 0 ) );
			}
		}

		// Prepare and connect appropriate filters
#if EQ_ENABLED
		if( bNeedEQFilter )
		{
			SAFE_CA_CALL( CreateAndConnectAudioUnit( kAudioUnitType_Effect, kAudioUnitSubType_AUFilter, kAudioUnitManufacturer_Apple, DestNode, DestInputNumber, StreamFormat, StreamFormat, &EQNode, &EQUnit ) );
			SAFE_CA_CALL( AUGraphConnectNodeInput( AudioDevice->GetAudioUnitGraph(), StreamSplitterNode, DestInputNumber, EQNode, 0 ) );
		}
		else
#endif
		{
			// Add direct connection between stream splitter and stream merger, for dry voice
			SAFE_CA_CALL( AUGraphConnectNodeInput( AudioDevice->GetAudioUnitGraph(), StreamSplitterNode, 0, StreamMergerNode, 0 ) );

			// Silencing dry voice (for testing)
//			for( int32 ChannelIndex = 0; ChannelIndex < StreamFormat->mChannelsPerFrame; ++ChannelIndex )
//			{
//				SAFE_CA_CALL( AudioUnitSetParameter( StreamSplitterUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Output, ChannelIndex, 0.0, 0 ) );
//			}
//			SAFE_CA_CALL( AudioUnitSetParameter( StreamSplitterUnit, kMatrixMixerParam_Enable, kAudioUnitScope_Output, 0, 0.0, 0 ) );
		}
		++DestInputNumber;

#if RADIO_ENABLED
		if( bNeedRadioFilter )
		{
			SAFE_CA_CALL( CreateAndConnectAudioUnit( kAudioUnitType_Effect, 'Rdio', 'Epic', DestNode, DestInputNumber, StreamFormat, StreamFormat, &RadioNode, &RadioUnit ) );
			SAFE_CA_CALL( AUGraphConnectNodeInput( AudioDevice->GetAudioUnitGraph(), StreamSplitterNode, DestInputNumber, RadioNode, 0 ) );
			++DestInputNumber;
		}
#endif
#if REVERB_ENABLED
		if( bNeedReverbFilter )
		{
			SAFE_CA_CALL( CreateAndConnectAudioUnit( kAudioUnitType_Effect, kAudioUnitSubType_MatrixReverb, kAudioUnitManufacturer_Apple, DestNode, DestInputNumber, StreamFormat, StreamFormat, &ReverbNode, &ReverbUnit ) );
			SAFE_CA_CALL( AUGraphConnectNodeInput( AudioDevice->GetAudioUnitGraph(), StreamSplitterNode, DestInputNumber, ReverbNode, 0 ) );
			++DestInputNumber;
		}
#endif

		DestNode = StreamSplitterNode;
		DestInputNumber = 0;
	}

	SAFE_CA_CALL( CreateAndConnectAudioUnit( kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter, kAudioUnitManufacturer_Apple, DestNode, DestInputNumber, StreamFormat, StreamFormat, &SourceNode, &SourceUnit ) );

	if( ErrorStatus == noErr )
	{
		AURenderCallbackStruct Input;
		Input.inputProc = &CoreAudioRenderCallback;
		Input.inputProcRefCon = this;
		SAFE_CA_CALL( AudioUnitSetProperty( SourceUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &Input, sizeof( Input ) ) );

		SAFE_CA_CALL( AUGraphUpdate( AudioDevice->GetAudioUnitGraph(), NULL ) );

		GAudioChannels[AudioChannel] = this;
	}
	return ErrorStatus == noErr;
}
Пример #28
0
static ALCenum ca_open_capture(ALCdevice *device, const ALCchar *deviceName)
{
    AudioStreamBasicDescription requestedFormat;  // The application requested format
    AudioStreamBasicDescription hardwareFormat;   // The hardware format
    AudioStreamBasicDescription outputFormat;     // The AudioUnit output format
    AURenderCallbackStruct input;
    ComponentDescription desc;
    AudioDeviceID inputDevice;
    UInt32 outputFrameCount;
    UInt32 propertySize;
    UInt32 enableIO;
    Component comp;
    ca_data *data;
    OSStatus err;

    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_HALOutput;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;

    // Search for component with given description
    comp = FindNextComponent(NULL, &desc);
    if(comp == NULL)
    {
        ERR("FindNextComponent failed\n");
        return ALC_INVALID_VALUE;
    }

    data = calloc(1, sizeof(*data));
    device->ExtraData = data;

    // Open the component
    err = OpenAComponent(comp, &data->audioUnit);
    if(err != noErr)
    {
        ERR("OpenAComponent failed\n");
        goto error;
    }

    // Turn off AudioUnit output
    enableIO = 0;
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Turn on AudioUnit input
    enableIO = 1;
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Get the default input device
    propertySize = sizeof(AudioDeviceID);
    err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice);
    if(err != noErr)
    {
        ERR("AudioHardwareGetProperty failed\n");
        goto error;
    }

    if(inputDevice == kAudioDeviceUnknown)
    {
        ERR("No input device found\n");
        goto error;
    }

    // Track the input device
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // set capture callback
    input.inputProc = ca_capture_callback;
    input.inputProcRefCon = device;

    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Initialize the device
    err = AudioUnitInitialize(data->audioUnit);
    if(err != noErr)
    {
        ERR("AudioUnitInitialize failed\n");
        goto error;
    }

    // Get the hardware format
    propertySize = sizeof(AudioStreamBasicDescription);
    err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize);
    if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription))
    {
        ERR("AudioUnitGetProperty failed\n");
        goto error;
    }

    // Set up the requested format description
    switch(device->FmtType)
    {
        case DevFmtUByte:
            requestedFormat.mBitsPerChannel = 8;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
            break;
        case DevFmtShort:
            requestedFormat.mBitsPerChannel = 16;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
            break;
        case DevFmtInt:
            requestedFormat.mBitsPerChannel = 32;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
            break;
        case DevFmtFloat:
            requestedFormat.mBitsPerChannel = 32;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
            break;
        case DevFmtByte:
        case DevFmtUShort:
        case DevFmtUInt:
            ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType));
            goto error;
    }

    switch(device->FmtChans)
    {
        case DevFmtMono:
            requestedFormat.mChannelsPerFrame = 1;
            break;
        case DevFmtStereo:
            requestedFormat.mChannelsPerFrame = 2;
            break;

        case DevFmtQuad:
        case DevFmtX51:
        case DevFmtX51Side:
        case DevFmtX61:
        case DevFmtX71:
            ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans));
            goto error;
    }

    requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8;
    requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame;
    requestedFormat.mSampleRate = device->Frequency;
    requestedFormat.mFormatID = kAudioFormatLinearPCM;
    requestedFormat.mReserved = 0;
    requestedFormat.mFramesPerPacket = 1;

    // save requested format description for later use
    data->format = requestedFormat;
    data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);

    // Use intermediate format for sample rate conversion (outputFormat)
    // Set sample rate to the same as hardware for resampling later
    outputFormat = requestedFormat;
    outputFormat.mSampleRate = hardwareFormat.mSampleRate;

    // Determine sample rate ratio for resampling
    data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency;

    // The output format should be the requested format, but using the hardware sample rate
    // This is because the AudioUnit will automatically scale other properties, except for sample rate
    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Set the AudioUnit output format frame count
    outputFrameCount = device->UpdateSize * data->sampleRateRatio;
    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed: %d\n", err);
        goto error;
    }

    // Set up sample converter
    err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter);
    if(err != noErr)
    {
        ERR("AudioConverterNew failed: %d\n", err);
        goto error;
    }

    // Create a buffer for use in the resample callback
    data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio);

    // Allocate buffer for the AudioUnit output
    data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio);
    if(data->bufferList == NULL)
        goto error;

    data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates);
    if(data->ring == NULL)
        goto error;

    al_string_copy_cstr(&device->DeviceName, deviceName);

    return ALC_NO_ERROR;

error:
    DestroyRingBuffer(data->ring);
    free(data->resampleBuffer);
    destroy_buffer_list(data->bufferList);

    if(data->audioConverter)
        AudioConverterDispose(data->audioConverter);
    if(data->audioUnit)
        CloseComponent(data->audioUnit);

    free(data);
    device->ExtraData = NULL;

    return ALC_INVALID_VALUE;
}
Пример #29
0
/*
 * Open codec.
 */
static pj_status_t ilbc_codec_open(pjmedia_codec *codec, 
				   pjmedia_codec_param *attr )
{
    struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
    pj_status_t status;
    unsigned i;
    pj_uint16_t dec_fmtp_mode = DEFAULT_MODE, 
		enc_fmtp_mode = DEFAULT_MODE;

#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
    AudioStreamBasicDescription srcFormat, dstFormat;
    UInt32 size;

    srcFormat.mSampleRate       = attr->info.clock_rate;
    srcFormat.mFormatID         = kAudioFormatLinearPCM;
    srcFormat.mFormatFlags      = kLinearPCMFormatFlagIsSignedInteger
				  | kLinearPCMFormatFlagIsPacked;
    srcFormat.mBitsPerChannel   = attr->info.pcm_bits_per_sample;
    srcFormat.mChannelsPerFrame = attr->info.channel_cnt;
    srcFormat.mBytesPerFrame    = srcFormat.mChannelsPerFrame
	                          * srcFormat.mBitsPerChannel >> 3;
    srcFormat.mFramesPerPacket  = 1;
    srcFormat.mBytesPerPacket   = srcFormat.mBytesPerFrame *
				  srcFormat.mFramesPerPacket;

    memset(&dstFormat, 0, sizeof(dstFormat));
    dstFormat.mSampleRate 	= attr->info.clock_rate;
    dstFormat.mFormatID 	= kAudioFormatiLBC;
    dstFormat.mChannelsPerFrame = attr->info.channel_cnt;
#endif

    pj_assert(ilbc_codec != NULL);
    pj_assert(ilbc_codec->enc_ready == PJ_FALSE && 
	      ilbc_codec->dec_ready == PJ_FALSE);

    /* Get decoder mode */
    for (i = 0; i < attr->setting.dec_fmtp.cnt; ++i) {
	if (pj_stricmp(&attr->setting.dec_fmtp.param[i].name, &STR_MODE) == 0)
	{
	    dec_fmtp_mode = (pj_uint16_t)
			    pj_strtoul(&attr->setting.dec_fmtp.param[i].val);
	    break;
	}
    }

    /* Decoder mode must be set */
    PJ_ASSERT_RETURN(dec_fmtp_mode == 20 || dec_fmtp_mode == 30, 
		     PJMEDIA_CODEC_EINMODE);

    /* Get encoder mode */
    for (i = 0; i < attr->setting.enc_fmtp.cnt; ++i) {
	if (pj_stricmp(&attr->setting.enc_fmtp.param[i].name, &STR_MODE) == 0)
	{
	    enc_fmtp_mode = (pj_uint16_t)
			    pj_strtoul(&attr->setting.enc_fmtp.param[i].val);
	    break;
	}
    }

    PJ_ASSERT_RETURN(enc_fmtp_mode==20 || enc_fmtp_mode==30, 
		     PJMEDIA_CODEC_EINMODE);

    /* Both sides of a bi-directional session MUST use the same "mode" value.
     * In this point, possible values are only 20 or 30, so when encoder and
     * decoder modes are not same, just use the default mode, it is 30.
     */
    if (enc_fmtp_mode != dec_fmtp_mode) {
	enc_fmtp_mode = dec_fmtp_mode = DEFAULT_MODE;
	PJ_LOG(4,(ilbc_codec->obj_name, 
		  "Normalized iLBC encoder and decoder modes to %d", 
		  DEFAULT_MODE));
    }

    /* Update some attributes based on negotiated mode. */
    attr->info.avg_bps = (dec_fmtp_mode == 30? 13333 : 15200);
    attr->info.frm_ptime = dec_fmtp_mode;

    /* Create encoder */
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
    dstFormat.mFramesPerPacket  = CLOCK_RATE * enc_fmtp_mode / 1000;
    dstFormat.mBytesPerPacket   = (enc_fmtp_mode == 20? 38 : 50);

    /* Use AudioFormat API to fill out the rest of the description */
    size = sizeof(dstFormat);
    AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
 	                   0, NULL, &size, &dstFormat);

    if (AudioConverterNew(&srcFormat, &dstFormat, &ilbc_codec->enc) != noErr)
	return PJMEDIA_CODEC_EFAILED;
    ilbc_codec->enc_frame_size = (enc_fmtp_mode == 20? 38 : 50);
#else
    ilbc_codec->enc_frame_size = initEncode(&ilbc_codec->enc, enc_fmtp_mode);
#endif
    ilbc_codec->enc_samples_per_frame = CLOCK_RATE * enc_fmtp_mode / 1000;
    ilbc_codec->enc_ready = PJ_TRUE;

    /* Create decoder */
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
    if (AudioConverterNew(&dstFormat, &srcFormat, &ilbc_codec->dec) != noErr)
	return PJMEDIA_CODEC_EFAILED;
    ilbc_codec->dec_samples_per_frame = CLOCK_RATE * dec_fmtp_mode / 1000;
#else
    ilbc_codec->dec_samples_per_frame = initDecode(&ilbc_codec->dec,
						   dec_fmtp_mode,
						   attr->setting.penh);
#endif
    ilbc_codec->dec_frame_size = (dec_fmtp_mode == 20? 38 : 50);
    ilbc_codec->dec_ready = PJ_TRUE;

    /* Save plc flags */
    ilbc_codec->plc_enabled = (attr->setting.plc != 0);

    /* Create silence detector. */
    ilbc_codec->vad_enabled = (attr->setting.vad != 0);
    status = pjmedia_silence_det_create(ilbc_codec->pool, CLOCK_RATE,
					ilbc_codec->enc_samples_per_frame,
					&ilbc_codec->vad);
    if (status != PJ_SUCCESS)
	return status;

    /* Init last_tx (not necessary because of zalloc, but better
     * be safe in case someone remove zalloc later.
     */
    pj_set_timestamp32(&ilbc_codec->last_tx, 0, 0);

    PJ_LOG(4,(ilbc_codec->obj_name, 
	      "iLBC codec opened, mode=%d", dec_fmtp_mode));

    return PJ_SUCCESS;
}
Пример #30
0
static av_cold int ffat_init_encoder(AVCodecContext *avctx)
{
    ATDecodeContext *at = avctx->priv_data;
    OSStatus status;

    AudioStreamBasicDescription in_format = {
        .mSampleRate = avctx->sample_rate,
        .mFormatID = kAudioFormatLinearPCM,
        .mFormatFlags = ((avctx->sample_fmt == AV_SAMPLE_FMT_FLT ||
                          avctx->sample_fmt == AV_SAMPLE_FMT_DBL) ? kAudioFormatFlagIsFloat
                        : avctx->sample_fmt == AV_SAMPLE_FMT_U8 ? 0
                        : kAudioFormatFlagIsSignedInteger)
                        | kAudioFormatFlagIsPacked,
        .mBytesPerPacket = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels,
        .mFramesPerPacket = 1,
        .mBytesPerFrame = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels,
        .mChannelsPerFrame = avctx->channels,
        .mBitsPerChannel = av_get_bytes_per_sample(avctx->sample_fmt) * 8,
    };
    AudioStreamBasicDescription out_format = {
        .mSampleRate = avctx->sample_rate,
        .mFormatID = ffat_get_format_id(avctx->codec_id, avctx->profile),
        .mChannelsPerFrame = in_format.mChannelsPerFrame,
    };
    UInt32 layout_size = sizeof(AudioChannelLayout) +
                         sizeof(AudioChannelDescription) * avctx->channels;
    AudioChannelLayout *channel_layout = av_malloc(layout_size);

    if (!channel_layout)
        return AVERROR(ENOMEM);

    if (avctx->codec_id == AV_CODEC_ID_ILBC) {
        int mode = get_ilbc_mode(avctx);
        out_format.mFramesPerPacket  = 8000 * mode / 1000;
        out_format.mBytesPerPacket   = (mode == 20 ? 38 : 50);
    }

    status = AudioConverterNew(&in_format, &out_format, &at->converter);

    if (status != 0) {
        av_log(avctx, AV_LOG_ERROR, "AudioToolbox init error: %i\n", (int)status);
        av_free(channel_layout);
        return AVERROR_UNKNOWN;
    }

    if (!avctx->channel_layout)
        avctx->channel_layout = av_get_default_channel_layout(avctx->channels);

    if ((status = remap_layout(channel_layout, avctx->channel_layout, avctx->channels)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid channel layout\n");
        av_free(channel_layout);
        return status;
    }

    if (AudioConverterSetProperty(at->converter, kAudioConverterInputChannelLayout,
                                  layout_size, channel_layout)) {
        av_log(avctx, AV_LOG_ERROR, "Unsupported input channel layout\n");
        av_free(channel_layout);
        return AVERROR(EINVAL);
    }
    if (avctx->codec_id == AV_CODEC_ID_AAC) {
        int tag = get_aac_tag(avctx->channel_layout);
        if (tag) {
            channel_layout->mChannelLayoutTag = tag;
            channel_layout->mNumberChannelDescriptions = 0;
        }
    }
    if (AudioConverterSetProperty(at->converter, kAudioConverterOutputChannelLayout,
                                  layout_size, channel_layout)) {
        av_log(avctx, AV_LOG_ERROR, "Unsupported output channel layout\n");
        av_free(channel_layout);
        return AVERROR(EINVAL);
    }
    av_free(channel_layout);

    if (avctx->bits_per_raw_sample)
        AudioConverterSetProperty(at->converter,
                                  kAudioConverterPropertyBitDepthHint,
                                  sizeof(avctx->bits_per_raw_sample),
                                  &avctx->bits_per_raw_sample);

#if !TARGET_OS_IPHONE
    if (at->mode == -1)
        at->mode = (avctx->flags & AV_CODEC_FLAG_QSCALE) ?
                   kAudioCodecBitRateControlMode_Variable :
                   kAudioCodecBitRateControlMode_Constant;

    AudioConverterSetProperty(at->converter, kAudioCodecPropertyBitRateControlMode,
                              sizeof(at->mode), &at->mode);

    if (at->mode == kAudioCodecBitRateControlMode_Variable) {
        int q = avctx->global_quality / FF_QP2LAMBDA;
        if (q < 0 || q > 14) {
            av_log(avctx, AV_LOG_WARNING,
                   "VBR quality %d out of range, should be 0-14\n", q);
            q = av_clip(q, 0, 14);
        }
        q = 127 - q * 9;
        AudioConverterSetProperty(at->converter, kAudioCodecPropertySoundQualityForVBR,
                                  sizeof(q), &q);
    } else
#endif
    if (avctx->bit_rate > 0) {
        UInt32 rate = avctx->bit_rate;
        UInt32 size;
        status = AudioConverterGetPropertyInfo(at->converter,
                                               kAudioConverterApplicableEncodeBitRates,
                                               &size, NULL);
        if (!status && size) {
            UInt32 new_rate = rate;
            int count;
            int i;
            AudioValueRange *ranges = av_malloc(size);
            if (!ranges)
                return AVERROR(ENOMEM);
            AudioConverterGetProperty(at->converter,
                                      kAudioConverterApplicableEncodeBitRates,
                                      &size, ranges);
            count = size / sizeof(AudioValueRange);
            for (i = 0; i < count; i++) {
                AudioValueRange *range = &ranges[i];
                if (rate >= range->mMinimum && rate <= range->mMaximum) {
                    new_rate = rate;
                    break;
                } else if (rate > range->mMaximum) {
                    new_rate = range->mMaximum;
                } else {
                    new_rate = range->mMinimum;
                    break;
                }
            }
            if (new_rate != rate) {
                av_log(avctx, AV_LOG_WARNING,
                       "Bitrate %u not allowed; changing to %u\n", rate, new_rate);
                rate = new_rate;
            }
            av_free(ranges);
        }
        AudioConverterSetProperty(at->converter, kAudioConverterEncodeBitRate,
                                  sizeof(rate), &rate);
    }

    at->quality = 96 - at->quality * 32;
    AudioConverterSetProperty(at->converter, kAudioConverterCodecQuality,
                              sizeof(at->quality), &at->quality);

    if (!AudioConverterGetPropertyInfo(at->converter, kAudioConverterCompressionMagicCookie,
                                       &avctx->extradata_size, NULL) &&
        avctx->extradata_size) {
        int extradata_size = avctx->extradata_size;
        uint8_t *extradata;
        if (!(avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE)))
            return AVERROR(ENOMEM);
        if (avctx->codec_id == AV_CODEC_ID_ALAC) {
            avctx->extradata_size = 0x24;
            AV_WB32(avctx->extradata,     0x24);
            AV_WB32(avctx->extradata + 4, MKBETAG('a','l','a','c'));
            extradata = avctx->extradata + 12;
            avctx->extradata_size = 0x24;
        } else {
            extradata = avctx->extradata;
        }
        status = AudioConverterGetProperty(at->converter,
                                           kAudioConverterCompressionMagicCookie,
                                           &extradata_size, extradata);
        if (status != 0) {
            av_log(avctx, AV_LOG_ERROR, "AudioToolbox cookie error: %i\n", (int)status);
            return AVERROR_UNKNOWN;
        } else if (avctx->codec_id == AV_CODEC_ID_AAC) {
            GetByteContext gb;
            int tag, len;
            bytestream2_init(&gb, extradata, extradata_size);
            do {
                len = read_descr(&gb, &tag);
                if (tag == MP4DecConfigDescrTag) {
                    bytestream2_skip(&gb, 13);
                    len = read_descr(&gb, &tag);
                    if (tag == MP4DecSpecificDescrTag) {
                        len = FFMIN(gb.buffer_end - gb.buffer, len);
                        memmove(extradata, gb.buffer, len);
                        avctx->extradata_size = len;
                        break;
                    }
                } else if (tag == MP4ESDescrTag) {
                    int flags;
                    bytestream2_skip(&gb, 2);
                    flags = bytestream2_get_byte(&gb);
                    if (flags & 0x80) //streamDependenceFlag
                        bytestream2_skip(&gb, 2);
                    if (flags & 0x40) //URL_Flag
                        bytestream2_skip(&gb, bytestream2_get_byte(&gb));
                    if (flags & 0x20) //OCRstreamFlag
                        bytestream2_skip(&gb, 2);
                }
            } while (bytestream2_get_bytes_left(&gb));
        } else if (avctx->codec_id != AV_CODEC_ID_ALAC) {
            avctx->extradata_size = extradata_size;
        }
    }

    ffat_update_ctx(avctx);

#if !TARGET_OS_IPHONE && defined(__MAC_10_9)
    if (at->mode == kAudioCodecBitRateControlMode_Variable && avctx->rc_max_rate) {
        UInt32 max_size = avctx->rc_max_rate * avctx->frame_size / avctx->sample_rate;
        if (max_size)
            AudioConverterSetProperty(at->converter, kAudioCodecPropertyPacketSizeLimitForVBR,
                                      sizeof(max_size), &max_size);
    }
#endif

    ff_af_queue_init(avctx, &at->afq);

    return 0;
}

static OSStatus ffat_encode_callback(AudioConverterRef converter, UInt32 *nb_packets,
                                     AudioBufferList *data,
                                     AudioStreamPacketDescription **packets,
                                     void *inctx)
{
    AVCodecContext *avctx = inctx;
    ATDecodeContext *at = avctx->priv_data;
    AVFrame *frame;

    if (!at->frame_queue.available) {
        if (at->eof) {
            *nb_packets = 0;
            return 0;
        } else {
            *nb_packets = 0;
            return 1;
        }
    }

    frame = ff_bufqueue_get(&at->frame_queue);

    data->mNumberBuffers              = 1;
    data->mBuffers[0].mNumberChannels = avctx->channels;
    data->mBuffers[0].mDataByteSize   = frame->nb_samples *
                                        av_get_bytes_per_sample(avctx->sample_fmt) *
                                        avctx->channels;
    data->mBuffers[0].mData           = frame->data[0];
    if (*nb_packets > frame->nb_samples)
        *nb_packets = frame->nb_samples;

    ff_bufqueue_add(avctx, &at->used_frame_queue, frame);

    return 0;
}

static int ffat_encode(AVCodecContext *avctx, AVPacket *avpkt,
                       const AVFrame *frame, int *got_packet_ptr)
{
    ATDecodeContext *at = avctx->priv_data;
    OSStatus ret;

    AudioBufferList out_buffers = {
        .mNumberBuffers = 1,
        .mBuffers = {
            {
                .mNumberChannels = avctx->channels,
                .mDataByteSize = at->pkt_size,
            }
        }
    };