Пример #1
0
OSStatus writeRenderProc(void *inRefCon, 
						 AudioUnitRenderActionFlags *inActionFlags,
						 const AudioTimeStamp *inTimeStamp, 
						 UInt32 inBusNumber,
						 UInt32 inNumFrames, 
						 AudioBufferList *ioData)
{
    OSStatus err= noErr;
    void *inInputDataProcUserData=NULL;
	CAData *d=(CAData*)inRefCon;
	if (gain_changed_out == true)
	{
		err = AudioUnitSetParameter(d->caOutAudioUnit, kAudioUnitParameterUnit_LinearGain,
									   kAudioUnitScope_Global, 0, (Float32)gain_volume_out, 0);
		if(err != noErr)
		{
			ms_error("failed to set output volume %i", err);
		}
	    gain_changed_out = false;
		err= noErr;
	}
	
	
	if(d->write_started != FALSE) {
		AudioStreamPacketDescription* outPacketDescription = NULL;
		err = AudioConverterFillComplexBuffer(d->caOutConverter, writeACInputProc, inRefCon,
											  &inNumFrames, ioData, outPacketDescription);
		if(err != noErr)
			ms_error("writeRenderProc:AudioConverterFillComplexBuffer err %08x %d", err, ioData->mNumberBuffers);
	}
    return err;
}
Пример #2
0
static ALCenum ca_capture_samples(ALCdevice *device, ALCvoid *buffer, ALCuint samples)
{
    ca_data *data = (ca_data*)device->ExtraData;
    AudioBufferList *list;
    UInt32 frameCount;
    OSStatus err;

    // If no samples are requested, just return
    if(samples == 0)
        return ALC_NO_ERROR;

    // Allocate a temporary AudioBufferList to use as the return resamples data
    list = alloca(sizeof(AudioBufferList) + sizeof(AudioBuffer));

    // Point the resampling buffer to the capture buffer
    list->mNumberBuffers = 1;
    list->mBuffers[0].mNumberChannels = data->format.mChannelsPerFrame;
    list->mBuffers[0].mDataByteSize = samples * data->frameSize;
    list->mBuffers[0].mData = buffer;

    // Resample into another AudioBufferList
    frameCount = samples;
    err = AudioConverterFillComplexBuffer(data->audioConverter, ca_capture_conversion_callback,
                                          device, &frameCount, list, NULL);
    if(err != noErr)
    {
        ERR("AudioConverterFillComplexBuffer error: %d\n", err);
        return ALC_INVALID_VALUE;
    }
    return ALC_NO_ERROR;
}
Пример #3
0
uint32_t CoreAudioEncoder::encodeChunk(UInt32 npackets)
{
    prepareOutputBuffer(npackets);
    AudioBufferList *abl = m_output_abl.get();
    AudioStreamPacketDescription *aspd = &m_packet_desc[0];

    CHECKCA(AudioConverterFillComplexBuffer(m_converter, staticInputDataProc,
                                            this, &npackets, abl, aspd));

    if (samplesRead() == 0)
        return false;

    if (npackets == 0 && abl->mBuffers[0].mDataByteSize == 0)
        return 0;

    if (!m_requires_packet_desc) {
        writeSamples(abl->mBuffers[0].mData,
                     abl->mBuffers[0].mDataByteSize, npackets);
    } else {
        for (uint32_t i = 0; i < npackets; ++i) {
            if (aspd[i].mVariableFramesInPacket) m_variable_packet_len = true;
            uint32_t nsamples =
                m_variable_packet_len ? aspd[i].mVariableFramesInPacket
                                      : m_output_desc.mFramesPerPacket;
            if (nsamples) {
                uint8_t *p = static_cast<uint8_t*>(abl->mBuffers[0].mData);
                writeSamples(p + aspd[i].mStartOffset,
                             aspd[i].mDataByteSize, nsamples);
            }
        }
    }
    return npackets;
}
Пример #4
0
static OSStatus gviHardwarePlaybackIOProc(AudioDeviceID inDevice,
                                          const AudioTimeStamp * inNow,
                                          const AudioBufferList * inInputData, 
                                          const AudioTimeStamp * inInputTime,
                                          AudioBufferList * outOutputData, 
                                          const AudioTimeStamp * inOutputTime,
                                          void * inClientData)
{
	GVIDevice * device = (GVIDevice *)inClientData;
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	AudioBufferList bufferList;
	OSStatus result;
	UInt32 size;

	// get a lock on the device
	if(!gviLockDevice(data))
		return (OSStatus)1;

	// calculate the number of samples the proc wants
	size = (outOutputData->mBuffers[0].mDataByteSize / data->m_playbackStreamDescriptor.mBytesPerFrame);

	// setup our own buffer list, pointing at the channel (buffer) we want
	bufferList.mNumberBuffers = 1;
	bufferList.mBuffers[0] = outOutputData->mBuffers[data->m_playbackChannel];

	// fill the buffer using the callback
	result = AudioConverterFillComplexBuffer(data->m_playbackConverter, gviAudioConverterPlaybackProc, device, &size, &bufferList, NULL);
	if(result != noErr)
		return (OSStatus)1;

	// release the device lock
	gviUnlockDevice(data);

	return noErr;
}
Пример #5
0
OSStatus
otMacAudioInputStream::InputReadyCb(void *userdata, AudioUnitRenderActionFlags *actionFlags,
                                    const AudioTimeStamp *timeStamp, UInt32 busNumber,
                                    UInt32 numberFrames, AudioBufferList *data)
{
  otMacAudioInputStream *_this = (otMacAudioInputStream*)userdata;
  OSStatus err;
  DEBUG_DUMP2("InputReadyCb numberFrames = %d, frameEnd = %d", numberFrames, _this->mFrameEnd);

  err = AudioUnitRender(_this->mAudioUnit, actionFlags, timeStamp, busNumber,
                        numberFrames, _this->mBuffer);
  if (err == noErr) {
    UInt32 bytes = _this->mBuffer->mBuffers[0].mDataByteSize;
    char *data = (char*) _this->mBuffer->mBuffers[0].mData;

    while (bytes > 0) {
      PRUint32 len = PR_MIN(_this->mInputFrameSize - _this->mFrameEnd, bytes);
      memcpy(_this->mFrame + _this->mFrameEnd, data, len);

  DEBUG_DUMP_N(("InputReadyCbInt bytes = %d frameEnd = %d, inputFrameSize = %d, frameSize = %d", bytes, _this->mFrameEnd, _this->mInputFrameSize, _this->mFrameSize));

      data += len;
      bytes -= len;
      _this->mFrameEnd += len;

      if (_this->mFrameEnd < _this->mInputFrameSize)
        break;

      UInt32 frameSize = _this->mFrameSize/2;

      err = AudioConverterFillComplexBuffer(_this->mConverter,
                                            &ConverterCb, _this, &frameSize,
                                            _this->mConvertBuffer, NULL);
      if (err != noErr) {
        printErrCode(err);
        return err;
      }

      DEBUG_DUMP("SendFrame");

      char *data2 = (char*)_this->mConvertBuffer->mBuffers[0].mData;

      _this->mFilter->InputData(data2, _this->mFrameSize);
      if (_this->mTarget)
        _this->mTarget->AcceptData(data2, _this->mFrameSize);

      //_this->mFrameEnd = 0;
    }
  } else {
    printErrCode(err);
  }

  return noErr;
}
Пример #6
0
UInt32 SFB::Audio::Converter::ConvertAudio(AudioBufferList *bufferList, UInt32 frameCount)
{
	if(!IsOpen() || nullptr == bufferList || 0 == frameCount)
		return 0;

	OSStatus result = AudioConverterFillComplexBuffer(mConverter, myAudioConverterComplexInputDataProc, mConverterState.get(), &frameCount, bufferList, nullptr);
	if(noErr != result)
		return 0;

	return frameCount;
}
Пример #7
0
static OSStatus convertProc(void *inRefCon, AudioUnitRenderActionFlags *inActionFlags,
                            const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber,
                            UInt32 inNumFrames, AudioBufferList *ioData)
{
	AudioStreamPacketDescription* outPacketDescription = NULL;
	mpg123_coreaudio_t* ca = (mpg123_coreaudio_t*)inRefCon;
	OSStatus err= noErr;
	
	err = AudioConverterFillComplexBuffer(ca->converter, playProc, inRefCon, &inNumFrames, ioData, outPacketDescription);
	
	return err;
}
Пример #8
0
void CocoaCaConverter::loadData( BufferList *ioData )
{	
	std::shared_ptr<AudioBufferList> nativeBufferList = createCaBufferList( ioData );

	UInt32 aSampleCount = ioData->mBuffers[0].mSampleCount;
	AudioStreamPacketDescription * outputPacketDescriptions = new AudioStreamPacketDescription[aSampleCount];
	
	OSStatus err = AudioConverterFillComplexBuffer( mConverter, CocoaCaConverter::dataInputCallback, (void *)this, &aSampleCount, nativeBufferList.get(), outputPacketDescriptions );
	delete [] outputPacketDescriptions;
	if( err ) {
		//throw
	}
	
	fillBufferListFromCaBufferList( ioData, nativeBufferList.get(), (uint32_t)aSampleCount );
}
Пример #9
0
// _______________________________________________________________________________________
//
void	CAAudioFile::WritePacketsFromCallback(
								AudioConverterComplexInputDataProc	inInputDataProc,
								void *								inInputDataProcUserData)
{
	while (true) {
		// keep writing until we exhaust the input (temporary stop), or produce no output (EOF)
		UInt32 numEncodedPackets = mIOBufferSizePackets;
		mIOBufferList.mBuffers[0].mDataByteSize = mIOBufferSizeBytes;
#if CAAUDIOFILE_PROFILE
		mInConverter = true;
#endif
		StartTiming(this, fill);
		OSStatus err = AudioConverterFillComplexBuffer(mConverter, inInputDataProc, inInputDataProcUserData,
					&numEncodedPackets, &mIOBufferList, mPacketDescs);
		ElapsedTime(this, fill, mTicksInConverter);
#if CAAUDIOFILE_PROFILE
		mInConverter = false;
#endif
		XThrowIf(err != 0 && err != kNoMoreInputRightNow, err, "convert audio packets (write)");
		if (numEncodedPackets == 0)
			break;
		Byte *buf = (Byte *)mIOBufferList.mBuffers[0].mData;
#if VERBOSE_IO
		printf("CAAudioFile::WritePacketsFromCallback: wrote %ld packets, %ld bytes\n", numEncodedPackets, mIOBufferList.mBuffers[0].mDataByteSize);
		if (mPacketDescs) {
			for (UInt32 i = 0; i < numEncodedPackets; ++i) {
				printf("  write packet %qd : offset %qd, length %ld\n", mPacketMark + i, mPacketDescs[i].mStartOffset, mPacketDescs[i].mDataByteSize);
#if VERBOSE_IO >= 2
				hexdump(buf + mPacketDescs[i].mStartOffset, mPacketDescs[i].mDataByteSize);
#endif
			}
		}
#endif
		StartTiming(this, write);
		XThrowIfError(AudioFileWritePackets(mAudioFile, mUseCache, mIOBufferList.mBuffers[0].mDataByteSize, mPacketDescs, mPacketMark, &numEncodedPackets, buf), "write audio file");
		ElapsedTime(this, write, mTicksInIO);
		mPacketMark += numEncodedPackets;
		//mNumberPackets += numEncodedPackets;
		if (mFileDataFormat.mFramesPerPacket > 0)
			mFrameMark += numEncodedPackets * mFileDataFormat.mFramesPerPacket;
		else {
			for (UInt32 i = 0; i < numEncodedPackets; ++i)
				mFrameMark += mPacketDescs[i].mVariableFramesInPacket;
		}
		if (err == kNoMoreInputRightNow)
			break;
	}
}
Пример #10
0
 void
 AACEncode::pushBuffer(const uint8_t* const data, size_t size, IMetadata& metadata)
 {
     const size_t sampleCount = size / m_bytesPerSample;
     const size_t aac_packet_count = sampleCount / kSamplesPerFrame;
     const size_t required_bytes = aac_packet_count * m_outputPacketMaxSize;
     
     if(m_outputBuffer.total() < (required_bytes)) {
         m_outputBuffer.resize(required_bytes);
     }
     uint8_t* p = m_outputBuffer();
     uint8_t* p_out = (uint8_t*)data;
     
     for ( size_t i = 0 ; i < aac_packet_count ; ++i ) {
         UInt32 num_packets = 1;
         
         AudioBufferList l;
         l.mNumberBuffers=1;
         l.mBuffers[0].mDataByteSize = m_outputPacketMaxSize * num_packets;
         l.mBuffers[0].mData = p;
         
         std::unique_ptr<UserData> ud(new UserData());
         ud->size = static_cast<int>(kSamplesPerFrame * m_bytesPerSample);
         ud->data = const_cast<uint8_t*>(p_out);
         ud->packetSize = static_cast<int>(m_bytesPerSample);
         
         AudioStreamPacketDescription output_packet_desc[num_packets];
         m_converterMutex.lock();
         AudioConverterFillComplexBuffer(m_audioConverter, AACEncode::ioProc, ud.get(), &num_packets, &l, output_packet_desc);
         m_converterMutex.unlock();
         
         p += output_packet_desc[0].mDataByteSize;
         p_out += kSamplesPerFrame * m_bytesPerSample;
     }
     const size_t totalBytes = p - m_outputBuffer();
     
     
     auto output = m_output.lock();
     if(output && totalBytes) {
         if(!m_sentConfig) {
             output->pushBuffer((const uint8_t*)m_asc, sizeof(m_asc), metadata);
             m_sentConfig = true;
         }
         
         output->pushBuffer(m_outputBuffer(), totalBytes, metadata);
     }
 }
Пример #11
0
void AudioFile::read(Float32 *data, UInt64 *cursor, UInt32 *numFrames)
{
  AudioFramePacketTranslation t;
  UInt32 size = sizeof(AudioFramePacketTranslation);
  t.mFrame = *cursor;
  AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFrameToPacket, &size, &t);
  *mCursor = t.mPacket;
  
  AudioFramePacketTranslation t2;
  t2.mFrame = *numFrames;
  AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFrameToPacket, &size, &t2);
  UInt32 numPacketsToRead = t2.mPacket ? t2.mPacket : 1;
  
  AudioBytePacketTranslation t3;
  t3.mPacket = numPacketsToRead;
  size = sizeof(AudioBytePacketTranslation);
  AudioFileGetProperty(mAudioFileID, kAudioFilePropertyPacketToByte, &size, &t3);
  
  if (mConverterBuffer) free(mConverterBuffer);
  mConverterBuffer = (char*)malloc(t3.mByte);
  mNumPacketsToRead = numPacketsToRead;
  
  UInt32 outNumBytes;
  checkError(AudioFileReadPackets(mAudioFileID, false, &outNumBytes, mPacketDescs, *mCursor, &numPacketsToRead, mConverterBuffer), "AudioFileReadPackets");
  mConvertByteSize = outNumBytes;
  
  UInt32 numFramesToConvert = t.mFrameOffsetInPacket + *numFrames;
  bool interleaved = true;
  interleaved = !(mClientFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved);
  AudioBufferList* tmpbuf = AudioSourceNode::createAudioBufferList(2, interleaved, numFramesToConvert, sizeof(Float32));
  checkError(AudioConverterFillComplexBuffer(mAudioConverterRef, encoderProc, this, &numFramesToConvert, tmpbuf, NULL),
             "AudioConverterFillComplexBuffer");
  
  if (interleaved) {
    Float32* sample = (Float32*)tmpbuf->mBuffers[0].mData;
    memcpy(data, &sample[t.mFrameOffsetInPacket], numFramesToConvert * sizeof(Float32) * mClientFormat.mChannelsPerFrame);
  }
  
  AudioSourceNode::deleteAudioBufferList(tmpbuf);
  
  if (numFramesToConvert == 0) {
    AudioConverterReset(mAudioConverterRef);
  }
  
  *numFrames = numFramesToConvert;
}
Пример #12
0
OSStatus MT32Synth::Render(AudioUnitRenderActionFlags &ioActionFlags, const AudioTimeStamp &inTimeStamp, UInt32 inNumberFrames) {

    if(!synth) {
        return noErr;
    }
    
    AUOutputElement* outputBus = GetOutput(0);
	outputBus->PrepareBuffer(inNumberFrames);
	
	AudioBufferList& outputBufList = outputBus->GetBufferList();
	AUBufferList::ZeroBuffer(outputBufList);

    UInt32 ioOutputDataPackets = inNumberFrames * destFormat.mFramesPerPacket;
    AudioConverterFillComplexBuffer(audioConverterRef, EncoderDataProc, (void*) this, &ioOutputDataPackets, &outputBufList, NULL);

    return noErr;
}
Пример #13
0
void AudioFile::read(AudioBufferList* buf, UInt64* cursor, UInt32* numFrames)
{
  AudioFramePacketTranslation t;
  UInt32 size = sizeof(AudioFramePacketTranslation);
  t.mFrame = *cursor;
  AudioFileGetProperty(mAudioFileID, kAudioFilePropertyFrameToPacket, &size, &t);
  *mCursor = t.mPacket;
  
  UInt32 numFramesToRead = *numFrames;//t.mFrameOffsetInPacket + *numFrames;
  AudioBufferList* tmpbuf = AudioSourceNode::createAudioBufferList(2, false, numFramesToRead, sizeof(Float32));
  checkError(AudioConverterFillComplexBuffer(mAudioConverterRef, encoderProc, this, &numFramesToRead, tmpbuf, NULL),
             "AudioConverterFillComplexBuffer");

  memcpy(buf->mBuffers[0].mData, tmpbuf->mBuffers[0].mData, *numFrames * sizeof(Float32));
  memcpy(buf->mBuffers[1].mData, tmpbuf->mBuffers[1].mData, *numFrames * sizeof(Float32));

  AudioSourceNode::deleteAudioBufferList(tmpbuf);
  if (numFramesToRead == 0) {
    AudioConverterReset(mAudioConverterRef);
  }
  *numFrames = numFramesToRead;
}
Пример #14
0
OSStatus AudioConverterConvertComplexBuffer(AudioConverterRef inAudioConverter, UInt32 inNumberPCMFrames, const AudioBufferList *inInputData, AudioBufferList *outOutputData)
{
	OSStatus status;
	UInt32 dataPacketSize;
	int64_t totalOutBytes = 0;

	AudioConverterComplexInputDataProc proc = [](AudioConverterRef audioConverter, UInt32* numberDataPackets, AudioBufferList* data, AudioStreamPacketDescription** dataPacketDescription, void* userData) -> OSStatus
	{
		const AudioBufferList *inInputData = (AudioBufferList*) inInputData;

		if (dataPacketDescription)
			*dataPacketDescription = nullptr;

		return unimpErr;
	};

	for (UInt32 i = 0; i < outOutputData->mNumberBuffers; i++)
		totalOutBytes += outOutputData->mBuffers[i].mDataByteSize;

	dataPacketSize = totalOutBytes / inAudioConverter->frameSize();

	status = AudioConverterFillComplexBuffer(inAudioConverter, proc, (void*) inInputData, &dataPacketSize, outOutputData, nullptr);
	return status;
}
Пример #15
0
    }

    hb_buffer_t *obuf;
    AudioStreamPacketDescription odesc = { 0 };
    AudioBufferList obuflist =
    {
        .mNumberBuffers = 1,
        .mBuffers = { { .mNumberChannels = pv->nchannels } },
    };

    obuf = hb_buffer_init(pv->omaxpacket);
    obuflist.mBuffers[0].mDataByteSize = obuf->size;
    obuflist.mBuffers[0].mData = obuf->data;

    OSStatus err = AudioConverterFillComplexBuffer(pv->converter,
                                                   inInputDataProc, pv,
                                                   &npackets, &obuflist, &odesc);

    if (err != noErr && err != 1)
    {
        hb_log("encCoreAudio: unexpected error in AudioConverterFillComplexBuffer()");
    }
    // only drop the output buffer if it's actually empty
    if (!npackets || odesc.mDataByteSize <= 0)
    {
        hb_log("encCoreAudio: 0 packets returned");
        return NULL;
    }

    obuf->size        = odesc.mDataByteSize;
    obuf->s.start     = 90000LL * pv->samples / pv->osamplerate;
Пример #16
0
MediaResult
AppleATDecoder::DecodeSample(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  // Array containing the queued decoded audio frames, about to be output.
  nsTArray<AudioDataValue> outputData;
  UInt32 channels = mOutputFormat.mChannelsPerFrame;
  // Pick a multiple of the frame size close to a power of two
  // for efficient allocation.
  const uint32_t MAX_AUDIO_FRAMES = 128;
  const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * channels;

  // Descriptions for _decompressed_ audio packets. ignored.
  auto packets = MakeUnique<AudioStreamPacketDescription[]>(MAX_AUDIO_FRAMES);

  // This API insists on having packets spoon-fed to it from a callback.
  // This structure exists only to pass our state.
  PassthroughUserData userData =
    { channels, (UInt32)aSample->Size(), aSample->Data() };

  // Decompressed audio buffer
  AlignedAudioBuffer decoded(maxDecodedSamples);
  if (!decoded) {
    return NS_ERROR_OUT_OF_MEMORY;
  }

  do {
    AudioBufferList decBuffer;
    decBuffer.mNumberBuffers = 1;
    decBuffer.mBuffers[0].mNumberChannels = channels;
    decBuffer.mBuffers[0].mDataByteSize =
      maxDecodedSamples * sizeof(AudioDataValue);
    decBuffer.mBuffers[0].mData = decoded.get();

    // in: the max number of packets we can handle from the decoder.
    // out: the number of packets the decoder is actually returning.
    UInt32 numFrames = MAX_AUDIO_FRAMES;

    OSStatus rv = AudioConverterFillComplexBuffer(mConverter,
                                                  _PassthroughInputDataCallback,
                                                  &userData,
                                                  &numFrames /* in/out */,
                                                  &decBuffer,
                                                  packets.get());

    if (rv && rv != kNoMoreDataErr) {
      LOG("Error decoding audio sample: %d\n", rv);
      return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                         RESULT_DETAIL("Error decoding audio sample: %d @ %lld",
                                       rv, aSample->mTime));
    }

    if (numFrames) {
      outputData.AppendElements(decoded.get(), numFrames * channels);
    }

    if (rv == kNoMoreDataErr) {
      break;
    }
  } while (true);

  if (outputData.IsEmpty()) {
    return NS_OK;
  }

  size_t numFrames = outputData.Length() / channels;
  int rate = mOutputFormat.mSampleRate;
  media::TimeUnit duration = FramesToTimeUnit(numFrames, rate);
  if (!duration.IsValid()) {
    NS_WARNING("Invalid count of accumulated audio samples");
    return MediaResult(
      NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
      RESULT_DETAIL(
        "Invalid count of accumulated audio samples: num:%llu rate:%d",
        uint64_t(numFrames), rate));
  }

#ifdef LOG_SAMPLE_DECODE
  LOG("pushed audio at time %lfs; duration %lfs\n",
      (double)aSample->mTime / USECS_PER_S,
      duration.ToSeconds());
#endif

  AudioSampleBuffer data(outputData.Elements(), outputData.Length());
  if (!data.Data()) {
    return NS_ERROR_OUT_OF_MEMORY;
  }
  if (mChannelLayout && !mAudioConverter) {
    AudioConfig in(*mChannelLayout.get(), rate);
    AudioConfig out(channels, rate);
    if (!in.IsValid() || !out.IsValid()) {
      return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                         RESULT_DETAIL("Invalid audio config"));
    }
    mAudioConverter = MakeUnique<AudioConverter>(in, out);
  }
  if (mAudioConverter) {
    MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
    data = mAudioConverter->Process(Move(data));
  }

  RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
                                          aSample->mTime,
                                          duration.ToMicroseconds(),
                                          numFrames,
                                          data.Forget(),
                                          channels,
                                          rate);
  mCallback->Output(audio);
  return NS_OK;
}
Пример #17
0
void
AppleATDecoder::SampleCallback(uint32_t aNumBytes,
                               uint32_t aNumPackets,
                               const void* aData,
                               AudioStreamPacketDescription* aPackets)
{
    // Pick a multiple of the frame size close to a power of two
    // for efficient allocation.
    const uint32_t MAX_AUDIO_FRAMES = 128;
    const uint32_t decodedSize = MAX_AUDIO_FRAMES * mConfig.channel_count *
                                 sizeof(AudioDataValue);

    // Descriptions for _decompressed_ audio packets. ignored.
    nsAutoArrayPtr<AudioStreamPacketDescription>
    packets(new AudioStreamPacketDescription[MAX_AUDIO_FRAMES]);

    // This API insists on having packets spoon-fed to it from a callback.
    // This structure exists only to pass our state and the result of the
    // parser on to the callback above.
    PassthroughUserData userData =
    { this, aNumPackets, aNumBytes, aData, aPackets, false };

    do {
        // Decompressed audio buffer
        nsAutoArrayPtr<uint8_t> decoded(new uint8_t[decodedSize]);

        AudioBufferList decBuffer;
        decBuffer.mNumberBuffers = 1;
        decBuffer.mBuffers[0].mNumberChannels = mOutputFormat.mChannelsPerFrame;
        decBuffer.mBuffers[0].mDataByteSize = decodedSize;
        decBuffer.mBuffers[0].mData = decoded.get();

        // in: the max number of packets we can handle from the decoder.
        // out: the number of packets the decoder is actually returning.
        UInt32 numFrames = MAX_AUDIO_FRAMES;

        OSStatus rv = AudioConverterFillComplexBuffer(mConverter,
                      _PassthroughInputDataCallback,
                      &userData,
                      &numFrames /* in/out */,
                      &decBuffer,
                      packets.get());

        if (rv && rv != kNeedMoreData) {
            LOG("Error decoding audio stream: %#x\n", rv);
            mCallback->Error();
            break;
        }
        LOG("%d frames decoded", numFrames);

        // If we decoded zero frames then AudioConverterFillComplexBuffer is out
        // of data to provide.  We drained its internal buffer completely on the
        // last pass.
        if (numFrames == 0 && rv == kNeedMoreData) {
            LOG("FillComplexBuffer out of data exactly\n");
            mCallback->InputExhausted();
            break;
        }

        const int rate = mOutputFormat.mSampleRate;
        const int channels = mOutputFormat.mChannelsPerFrame;

        int64_t time = mCurrentAudioTimestamp;
        int64_t duration = FramesToUsecs(numFrames, rate).value();

        LOG("pushed audio at time %lfs; duration %lfs\n",
            (double)time / USECS_PER_S, (double)duration / USECS_PER_S);

        AudioData* audio = new AudioData(mSamplePosition,
                                         time, duration, numFrames,
                                         reinterpret_cast<AudioDataValue*>(decoded.forget()),
                                         channels, rate);
        mCallback->Output(audio);
        mHaveOutput = true;

        if (rv == kNeedMoreData) {
            // No error; we just need more data.
            LOG("FillComplexBuffer out of data\n");
            mCallback->InputExhausted();
            break;
        }
    } while (true);
}
Пример #18
0
OSStatus readRenderProc(void *inRefCon, 
						AudioUnitRenderActionFlags *inActionFlags,
						const AudioTimeStamp *inTimeStamp, 
						UInt32 inBusNumber,
						UInt32 inNumFrames, 
						AudioBufferList *ioData)
{
	CAData *d=(CAData*)inRefCon;
	OSStatus	err = noErr;
	
	err = AudioUnitRender(d->caInAudioUnit, inActionFlags, inTimeStamp, inBusNumber,
						  inNumFrames, d->fAudioBuffer);
	if(err != noErr)
	{
		ms_error("AudioUnitRender %d size = %d", err, d->fAudioBuffer->mBuffers[0].mDataByteSize);
		return err;
	}
	
	UInt32 AvailableOutputBytes = inNumFrames * sizeof (float) * d->caInASBD.mChannelsPerFrame;
    UInt32 propertySize = sizeof (AvailableOutputBytes);
    err = AudioConverterGetProperty (d->caInConverter,
									 kAudioConverterPropertyCalculateOutputBufferSize,
									 &propertySize,
									 &AvailableOutputBytes);
	
	if(err != noErr)
	{
		ms_error("AudioConverterGetProperty kAudioConverterPropertyCalculateOutputBufferSize %d", err);
		return err;
	}
	
	if (AvailableOutputBytes>d->fMSBuffer->mBuffers[0].mDataByteSize)
	{	
		DestroyAudioBufferList(d->fMSBuffer);
		d->fMSBuffer = AllocateAudioBufferList(d->stereo ? 2 : 1,
											   AvailableOutputBytes);
	}
	
	UInt32 ActualOutputFrames = AvailableOutputBytes / ((d->bits / 8) * 1) / d->caInASBD.mChannelsPerFrame;
	err = AudioConverterFillComplexBuffer (d->caInConverter,
										   (AudioConverterComplexInputDataProc)(readACInputProc),
										   inRefCon,
										   &ActualOutputFrames,
										   d->fMSBuffer,
										   NULL);
	if(err != noErr)
	{
		ms_error("readRenderProc:AudioConverterFillComplexBuffer %d", err);
		return err;
	}
	
	mblk_t *rm=NULL;
	rm=allocb(ActualOutputFrames*2,0);
	memcpy(rm->b_wptr, d->fMSBuffer->mBuffers[0].mData, ActualOutputFrames*2);
	rm->b_wptr+=ActualOutputFrames*2;
	
	if (gain_volume_in != 1.0f)
	{
		int16_t *ptr=(int16_t *)rm->b_rptr;
		for (;ptr<(int16_t *)rm->b_wptr;ptr++)
		{
			*ptr=(int16_t)(((float)(*ptr))*gain_volume_in);
		}
	}
	
	ms_mutex_lock(&d->mutex);
	putq(&d->rq,rm);
	ms_mutex_unlock(&d->mutex);
	rm=NULL;
	
	return err;
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate, UInt32 outputBitRate)
{
	AudioFileID         sourceFileID = 0;
    AudioFileID         destinationFileID = 0;
    AudioConverterRef   converter = NULL;
    Boolean             canResumeFromInterruption = true; // we can continue unless told otherwise
    
    CAStreamBasicDescription srcFormat, dstFormat;
    AudioFileIO afio = {};
    
    char                         *outputBuffer = NULL;
    AudioStreamPacketDescription *outputPacketDescriptions = NULL;
    
    OSStatus error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    
    printf("\nDoConvertFile\n");
    
    try {
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, kAudioFileReadPermission, 0, &sourceFileID), "AudioFileOpenURL failed");
	
        // get the source data format
        UInt32 size = sizeof(srcFormat);
        XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyDataFormat, &size, &srcFormat), "couldn't get source data format");
        
        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if the output format is PC create a 16-bit int PCM file format description as an example
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("Source File format: "); srcFormat.Print();
        printf("Destination format: "); dstFormat.Print();
	
        // create the AudioConverter
        
        XThrowIfError(AudioConverterNew(&srcFormat, &dstFormat, &converter), "AudioConverterNew failed!");
    
        // if the source has a cookie, get it and set it on the Audio Converter
        ReadCookie(sourceFileID, converter);

        // get the actual formats back from the Audio Converter
        size = sizeof(srcFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentInputStreamDescription, &size, &srcFormat), "AudioConverterGetProperty kAudioConverterCurrentInputStreamDescription failed!");

        size = sizeof(dstFormat);
        XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterCurrentOutputStreamDescription, &size, &dstFormat), "AudioConverterGetProperty kAudioConverterCurrentOutputStreamDescription failed!");

        printf("Formats returned from AudioConverter:\n");
        printf("              Source format: "); srcFormat.Print();
        printf("    Destination File format: "); dstFormat.Print();
        
        // if encoding to AAC set the bitrate
        // kAudioConverterEncodeBitRate is a UInt32 value containing the number of bits per second to aim for when encoding data
        // when you explicitly set the bit rate and the sample rate, this tells the encoder to stick with both bit rate and sample rate
        //     but there are combinations (also depending on the number of channels) which will not be allowed
        // if you do not explicitly set a bit rate the encoder will pick the correct value for you depending on samplerate and number of channels
        // bit rate also scales with the number of channels, therefore one bit rate per sample rate can be used for mono cases
        //    and if you have stereo or more, you can multiply that number by the number of channels.
        
        if (outputBitRate == 0) {
            outputBitRate = 192000; // 192kbs
        }
        
        if (dstFormat.mFormatID == kAudioFormatMPEG4AAC) {
            
            UInt32 propSize = sizeof(outputBitRate);
            
            // set the bit rate depending on the samplerate chosen
            XThrowIfError(AudioConverterSetProperty(converter, kAudioConverterEncodeBitRate, propSize, &outputBitRate),
                           "AudioConverterSetProperty kAudioConverterEncodeBitRate failed!");
            
            // get it back and print it out
            AudioConverterGetProperty(converter, kAudioConverterEncodeBitRate, &propSize, &outputBitRate);
            printf ("AAC Encode Bitrate: %u\n", (unsigned int)outputBitRate);
        }

        // can the Audio Converter resume conversion after an interruption?
        // this property may be queried at any time after construction of the Audio Converter after setting its output format
        // there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(converter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported - see comments in source for more info.\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld, paramErr is OK if PCM\n", error);
            }
            
            error = noErr;
        }
        
        // create the destination file 
        XThrowIfError(AudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, kAudioFileFlags_EraseFile, &destinationFileID), "AudioFileCreateWithURL failed!");

        // set up source buffers and data proc info struct
        afio.srcFileID = sourceFileID;
        afio.srcBufferSize = 32768;
        afio.srcBuffer = new char [afio.srcBufferSize];
        afio.srcFilePos = 0;
        afio.srcFormat = srcFormat;
		
        if (srcFormat.mBytesPerPacket == 0) {
            // if the source format is VBR, we need to get the maximum packet size
            // use kAudioFilePropertyPacketSizeUpperBound which returns the theoretical maximum packet size
            // in the file (without actually scanning the whole file to find the largest packet,
            // as may happen with kAudioFilePropertyMaximumPacketSize)
            size = sizeof(afio.srcSizePerPacket);
            XThrowIfError(AudioFileGetProperty(sourceFileID, kAudioFilePropertyPacketSizeUpperBound, &size, &afio.srcSizePerPacket), "AudioFileGetProperty kAudioFilePropertyPacketSizeUpperBound failed!");
            
            // how many packets can we read for our buffer size?
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            afio.packetDescriptions = new AudioStreamPacketDescription [afio.numPacketsPerRead];
        } else {
            // CBR source format
            afio.srcSizePerPacket = srcFormat.mBytesPerPacket;
            afio.numPacketsPerRead = afio.srcBufferSize / afio.srcSizePerPacket;
            afio.packetDescriptions = NULL;
        }

        // set up output buffers
        UInt32 outputSizePerPacket = dstFormat.mBytesPerPacket; // this will be non-zero if the format is CBR
        UInt32 theOutputBufSize = 32768;
        outputBuffer = new char[theOutputBufSize];
        
        if (outputSizePerPacket == 0) {
            // if the destination format is VBR, we need to get max size per packet from the converter
            size = sizeof(outputSizePerPacket);
            XThrowIfError(AudioConverterGetProperty(converter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &outputSizePerPacket), "AudioConverterGetProperty kAudioConverterPropertyMaximumOutputPacketSize failed!");
            
            // allocate memory for the PacketDescription structures describing the layout of each packet
            outputPacketDescriptions = new AudioStreamPacketDescription [theOutputBufSize / outputSizePerPacket];
        }
        UInt32 numOutputPackets = theOutputBufSize / outputSizePerPacket;

        // if the destination format has a cookie, get it and set it on the output file
        WriteCookie(converter, destinationFileID);

        // write destination channel layout
        if (srcFormat.mChannelsPerFrame > 2) {
            WriteDestinationChannelLayout(converter, sourceFileID, destinationFileID);
        }

        UInt64 totalOutputFrames = 0; // used for debgging printf
        SInt64 outputFilePos = 0;
        
        // loop to convert data
        printf("Converting...\n");
        while (1) {

            // set up output buffer list
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = dstFormat.mChannelsPerFrame;
            fillBufList.mBuffers[0].mDataByteSize = theOutputBufSize;
            fillBufList.mBuffers[0].mData = outputBuffer;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = NO;
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the Audio Converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            // convert data
            UInt32 ioOutputDataPackets = numOutputPackets;
            printf("AudioConverterFillComplexBuffer...\n");
            error = AudioConverterFillComplexBuffer(converter, EncoderDataProc, &afio, &ioOutputDataPackets, &fillBufList, outputPacketDescriptions);
            // if interrupted in the process of the conversion call, we must handle the error appropriately
            if (error) {
                if (kAudioConverterErr_HardwareInUse == error) {
                     printf("Audio Converter returned kAudioConverterErr_HardwareInUse!\n");
                } else {
                    XThrowIfError(error, "AudioConverterFillComplexBuffer error!");
                }
            } else {
                if (ioOutputDataPackets == 0) {
                    // this is the EOF conditon
                    error = noErr;
                    break;
                }
            }
            
            if (noErr == error) {
                // write to output file
                UInt32 inNumBytes = fillBufList.mBuffers[0].mDataByteSize;
                XThrowIfError(AudioFileWritePackets(destinationFileID, false, inNumBytes, outputPacketDescriptions, outputFilePos, &ioOutputDataPackets, outputBuffer), "AudioFileWritePackets failed!");
            
                printf("Convert Output: Write %lu packets at position %lld, size: %ld\n", ioOutputDataPackets, outputFilePos, inNumBytes);
                
                // advance output file packet position
                outputFilePos += ioOutputDataPackets;

                if (dstFormat.mFramesPerPacket) { 
                    // the format has constant frames per packet
                    totalOutputFrames += (ioOutputDataPackets * dstFormat.mFramesPerPacket);
                } else if (outputPacketDescriptions != NULL) {
                    // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
                    for (UInt32 i = 0; i < ioOutputDataPackets; ++i)
                        totalOutputFrames += outputPacketDescriptions[i].mVariableFramesInPacket;
                }
            }
        } // while

        if (noErr == error) {
            // write out any of the leading and trailing frames for compressed formats only
            if (dstFormat.mBitsPerChannel == 0) {
                // our output frame count should jive with
                printf("Total number of output frames counted: %lld\n", totalOutputFrames); 
                WritePacketTableInfo(converter, destinationFileID);
            }
        
            // write the cookie again - sometimes codecs will update cookies at the end of a conversion
            WriteCookie(converter, destinationFileID);
        }
    }
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // cleanup
    if (converter) AudioConverterDispose(converter);
    if (destinationFileID) AudioFileClose(destinationFileID);
	if (sourceFileID) AudioFileClose(sourceFileID);
    
    if (afio.srcBuffer) delete [] afio.srcBuffer;
    if (afio.packetDescriptions) delete [] afio.packetDescriptions;
    if (outputBuffer) delete [] outputBuffer;
    if (outputPacketDescriptions) delete [] outputPacketDescriptions;
    
    
    return error;
}
Пример #20
0
    std::shared_ptr<Buffer>
    AudioMixer::resample(const uint8_t* const buffer,
                         size_t size,
                         AudioBufferMetadata& metadata)
    {
        const auto inFrequncyInHz = metadata.getData<kAudioMetadataFrequencyInHz>();
        const auto inBitsPerChannel = metadata.getData<kAudioMetadataBitsPerChannel>();
        const auto inChannelCount = metadata.getData<kAudioMetadataChannelCount>();
        const auto inFlags = metadata.getData<kAudioMetadataFlags>();
        const auto inBytesPerFrame = metadata.getData<kAudioMetadataBytesPerFrame>();
        const auto inNumberFrames = metadata.getData<kAudioMetadataNumberFrames>();
        const auto inUsesOSStruct = metadata.getData<kAudioMetadataUsesOSStruct>();
        
        if(m_outFrequencyInHz == inFrequncyInHz &&
           m_outBitsPerChannel == inBitsPerChannel &&
           m_outChannelCount == inChannelCount
           && !(inFlags & kAudioFormatFlagIsNonInterleaved)
           && !(inFlags & kAudioFormatFlagIsFloat))
        {
            // No resampling necessary
            return std::make_shared<Buffer>();
        }
        
        uint64_t hash = uint64_t(inBytesPerFrame&0xFF) << 56 | uint64_t(inFlags&0xFF) << 48 | uint64_t(inChannelCount&0xFF) << 40
                        | uint64_t(inBitsPerChannel&0xFF) << 32 | inFrequncyInHz;
        
        auto it = m_converters.find(hash) ;
        ConverterInst converter = {0};
        
        if(it == m_converters.end()) {
            AudioStreamBasicDescription in = {0};
            AudioStreamBasicDescription out = {0};
            
            in.mFormatID = kAudioFormatLinearPCM;
            in.mFormatFlags =  inFlags;
            in.mChannelsPerFrame = inChannelCount;
            in.mSampleRate = inFrequncyInHz;
            in.mBitsPerChannel = inBitsPerChannel;
            in.mBytesPerFrame = inBytesPerFrame;
            in.mFramesPerPacket = 1;
            in.mBytesPerPacket = in.mBytesPerFrame * in.mFramesPerPacket;
            
            out.mFormatID = kAudioFormatLinearPCM;
            out.mFormatFlags =  kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
            out.mChannelsPerFrame = m_outChannelCount;
            out.mSampleRate = m_outFrequencyInHz;
            out.mBitsPerChannel = m_outBitsPerChannel;
            out.mBytesPerFrame = (out.mBitsPerChannel * out.mChannelsPerFrame) / 8;
            out.mFramesPerPacket = 1;
            out.mBytesPerPacket = out.mBytesPerFrame * out.mFramesPerPacket;
            
            converter.asbdIn = in;
            converter.asbdOut = out;
            
            OSStatus ret = AudioConverterNew(&in, &out, &converter.converter);
            
            AudioConverterSetProperty(converter.converter,
                                      kAudioConverterSampleRateConverterComplexity,
                                      sizeof(s_samplingRateConverterComplexity),
                                      &s_samplingRateConverterComplexity);
            
            AudioConverterSetProperty(converter.converter,
                                      kAudioConverterSampleRateConverterQuality,
                                      sizeof(s_samplingRateConverterQuality),
                                      &s_samplingRateConverterQuality);
        
            auto prime = kConverterPrimeMethod_None;
            
            AudioConverterSetProperty(converter.converter,
                                      kAudioConverterPrimeMethod,
                                      sizeof(prime),
                                      &prime);
            
            m_converters[hash] = converter;
            
            if(ret != noErr) {
                DLog("ret = %d (%x)", (int)ret, (unsigned)ret);
            }
            
        } else {
            converter = it->second;
        }
        
        auto & in = converter.asbdIn;
        auto & out = converter.asbdOut;

        const double inSampleCount = inNumberFrames;
        const double ratio = static_cast<double>(inFrequncyInHz) / static_cast<double>(m_outFrequencyInHz);
        
        const double outBufferSampleCount = std::round(double(inSampleCount) / ratio);
        
        const size_t outBufferSize = out.mBytesPerPacket * outBufferSampleCount;
        const auto outBuffer = std::make_shared<Buffer>(outBufferSize);
        
        
        std::unique_ptr<UserData> ud(new UserData());
        ud->size = static_cast<int>(size);
        ud->data = const_cast<uint8_t*>(buffer);
        ud->p = inUsesOSStruct ? 0 : ud->data;
        ud->packetSize = in.mBytesPerPacket;
        ud->numberPackets = inSampleCount;
        ud->numChannels = inChannelCount;
        ud->isInterleaved = !(inFlags & kAudioFormatFlagIsNonInterleaved);
        ud->usesOSStruct = inUsesOSStruct;
        
        AudioBufferList outBufferList;
        outBufferList.mNumberBuffers = 1;
        outBufferList.mBuffers[0].mDataByteSize = static_cast<UInt32>(outBufferSize);
        outBufferList.mBuffers[0].mNumberChannels = m_outChannelCount;
        outBufferList.mBuffers[0].mData = (*outBuffer)();
        
        UInt32 sampleCount = outBufferSampleCount;
        OSStatus ret = AudioConverterFillComplexBuffer(converter.converter, /* AudioConverterRef inAudioConverter */
                                        AudioMixer::ioProc, /* AudioConverterComplexInputDataProc inInputDataProc */
                                        ud.get(), /* void *inInputDataProcUserData */
                                        &sampleCount, /* UInt32 *ioOutputDataPacketSize */
                                        &outBufferList, /* AudioBufferList *outOutputData */
                                        NULL /* AudioStreamPacketDescription *outPacketDescription */
                                        );
        if(ret != noErr) {
            DLog("ret = %d (%x)", (int)ret, (unsigned)ret);
        }
      
        outBuffer->setSize(outBufferList.mBuffers[0].mDataByteSize);
        return outBuffer;
    }
Пример #21
0
    /* Encode */
    output->size = 0;
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
    npackets = nsamples / ilbc_codec->enc_samples_per_frame;

    theABL.mNumberBuffers = 1;
    theABL.mBuffers[0].mNumberChannels = 1;
    theABL.mBuffers[0].mDataByteSize = output_buf_len;
    theABL.mBuffers[0].mData = output->buf;

    ilbc_codec->enc_total_packets = npackets;
    ilbc_codec->enc_buffer = (char *)input->buf;
    ilbc_codec->enc_buffer_offset = 0;

    err = AudioConverterFillComplexBuffer(ilbc_codec->enc, encodeDataProc,
					  ilbc_codec, &npackets,
					  &theABL, NULL);
    if (err == noErr) {
	output->size = npackets * ilbc_codec->enc_frame_size;
    }
#else
    while (nsamples >= ilbc_codec->enc_samples_per_frame) {
	unsigned i;
	
	/* Convert to float */
	for (i=0; i<ilbc_codec->enc_samples_per_frame; ++i) {
	    ilbc_codec->enc_block[i] = (float) (*pcm_in++);
	}

	iLBC_encode((unsigned char *)output->buf + output->size, 
		    ilbc_codec->enc_block, 
Пример #22
0
static OSStatus gviHardwareCaptureIOProc(AudioDeviceID inDevice,
                                         const AudioTimeStamp * inNow,
                                         const AudioBufferList * inInputData, 
                                         const AudioTimeStamp * inInputTime,
                                         AudioBufferList * outOutputData, 
                                         const AudioTimeStamp * inOutputTime,
                                         void * inClientData)
{
	GVIDevice * device = (GVIDevice *)inClientData;
	GVIHardwareData * data = (GVIHardwareData *)device->m_data;
	OSStatus result;
	UInt32 size;
	GVICaptureConverterData converterData;
	AudioBufferList bufferList;
	GVICapturedFrame * frame;

	// get a lock on the device
	if(!gviLockDevice(data))
		return (OSStatus)1;

	// make sure we are capturing
	if(data->m_capturing)
	{
		// setup the buffer list
		bufferList.mNumberBuffers = 1;
		bufferList.mBuffers[0].mNumberChannels = 1;
		bufferList.mBuffers[0].mDataByteSize = GVIBytesPerFrame;
		bufferList.mBuffers[0].mData = data->m_captureBuffer;

		// setup the converter data struct
		converterData.m_device = device;
		converterData.m_capturedAudio = (AudioBufferList *)inInputData;
		converterData.m_used = GVFalse;

		// loop while it is converting data
		do
		{
			// request one frame
			size = GVISamplesPerFrame;

			// convert the captured data into our format
			result = AudioConverterFillComplexBuffer(data->m_captureConverter, gviAudioConverterCaptureProc, &converterData, &size, &bufferList, NULL);

			// was there enough to fill a buffer
			if(result == noErr)
			{
				// get a frame
				frame = gviPopFirstFrame(&data->m_captureAvailableFrames);

				// if there aren't any available frames, repurpose the oldest captured frame
				if(!frame)
				{
					frame = gviPopFirstFrame(&data->m_capturedFrames);
					assert(frame);
					if(!frame)
						break;
				}

				// setup the frame
				memcpy(frame->m_frame, data->m_captureBuffer, GVIBytesPerFrame);
				frame->m_frameStamp = data->m_captureClock;

				// increment the capture clock
				data->m_captureClock++;

				// add this frame to the end of the capture list
				gviPushLastFrame(&data->m_capturedFrames, frame);
			}
		}
		while(result == noErr);
	}

	// release the device lock
	gviUnlockDevice(data);

	return noErr;
}
void Convert(MyAudioConverterSettings *mySettings)
{
	// create audioConverter object
	AudioConverterRef	audioConverter;
    CheckResult (AudioConverterNew(&mySettings->inputFormat, &mySettings->outputFormat, &audioConverter),
				 "AudioConveterNew failed");
	
	// allocate packet descriptions if the input file is VBR
	UInt32 packetsPerBuffer = 0;
	UInt32 outputBufferSize = 32 * 1024; // 32 KB is a good starting point
	UInt32 sizePerPacket = mySettings->inputFormat.mBytesPerPacket;	
	if (sizePerPacket == 0)
	{
		UInt32 size = sizeof(sizePerPacket);
        CheckResult(AudioConverterGetProperty(audioConverter, kAudioConverterPropertyMaximumOutputPacketSize, &size, &sizePerPacket),
					"Couldn't get kAudioConverterPropertyMaximumOutputPacketSize");
		
        // make sure the buffer is large enough to hold at least one packet
		if (sizePerPacket > outputBufferSize)
			outputBufferSize = sizePerPacket;
		
		packetsPerBuffer = outputBufferSize / sizePerPacket;
		mySettings->inputFilePacketDescriptions = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * packetsPerBuffer);
		
	}
	else
	{
		packetsPerBuffer = outputBufferSize / sizePerPacket;
	}
	
	// allocate destination buffer
	UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8) * outputBufferSize); // CHRIS: not sizeof(UInt8*). check book text!
	
	UInt32 outputFilePacketPosition = 0; //in bytes
	while(1)
	{
		// wrap the destination buffer in an AudioBufferList
		AudioBufferList convertedData;
		convertedData.mNumberBuffers = 1;
		convertedData.mBuffers[0].mNumberChannels = mySettings->inputFormat.mChannelsPerFrame;
		convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
		convertedData.mBuffers[0].mData = outputBuffer;
		
		// now call the audioConverter to transcode the data. This function will call
		// the callback function as many times as required to fulfill the request.
		UInt32 ioOutputDataPackets = packetsPerBuffer;
		OSStatus error = AudioConverterFillComplexBuffer(audioConverter, 
														 MyAudioConverterCallback, 
														 mySettings, 
														 &ioOutputDataPackets, 
														 &convertedData, 
														 (mySettings->inputFilePacketDescriptions ? mySettings->inputFilePacketDescriptions : nil));
		if (error || !ioOutputDataPackets)
		{
			//		fprintf(stderr, "err: %ld, packets: %ld\n", err, ioOutputDataPackets);
			break;	// this is our termination condition
		}
		
		// write the converted data to the output file
		// KEVIN: QUESTION: 3rd arg seems like it should be a byte count, not packets. why does this work?
		CheckResult (AudioFileWritePackets(mySettings->outputFile,
										   FALSE,
										   ioOutputDataPackets,
										   NULL,
										   outputFilePacketPosition / mySettings->outputFormat.mBytesPerPacket, 
										   &ioOutputDataPackets,
										   convertedData.mBuffers[0].mData),
					 "Couldn't write packets to file");
		
		// advance the output file write location
		outputFilePacketPosition += (ioOutputDataPackets * mySettings->outputFormat.mBytesPerPacket);
	}
	
	AudioConverterDispose(audioConverter);
}
Пример #24
0
int adin_mic_read(void *buffer, int nsamples) {
    OSStatus status;

#ifdef DEBUG
    jlog("Stat: adin_darwin: read: %d samples required\n", nsamples);
#endif

    if (!CoreAudioHasInputDevice)
        return -1;

    if (!CoreAudioRecordStarted) {
        status = AudioOutputUnitStart(InputUnit);
        CoreAudioRecordStarted = TRUE;
    }

    UInt32 capacity = BUF_SAMPLES * OutputSamplesPerPacket;
    UInt32 npackets = nsamples * OutputSamplesPerPacket;

    UInt32 numDataPacketsNeeded;

    Sample* inputDataBuf = (Sample*)(BufListConverted->mBuffers[0].mData);

    numDataPacketsNeeded = npackets < capacity ? npackets : capacity;

#ifdef DEBUG
    jlog("Stat: adin_darwin: numDataPacketsNeeded=%d\n", numDataPacketsNeeded);
#endif

    status = AudioConverterFillComplexBuffer(Converter,
             ConvInputProc,
             NULL, // user data
             &numDataPacketsNeeded,
             BufListConverted,
             NULL // packet description
                                            );
    if (status != noErr) {
        jlog("Error: adin_darwin: AudioConverterFillComplexBuffer: failed\n");
        return -1;
    }

#ifdef DEBUG
    jlog("Stat: adin_darwin: %d bytes filled (BufListConverted)\n",
         BufListConverted->mBuffers[0].mDataByteSize);
#endif

    int providedSamples = numDataPacketsNeeded / OutputSamplesPerPacket;

    pthread_mutex_lock(&MutexInput);

#ifdef DEBUG
    jlog("Stat: adin_darwin: provided samples: %d\n", providedSamples);
#endif

    Sample* dst_data = (Sample*)buffer;

    int i;

    int count = 0;

    for (i = 0; i < providedSamples; i++) {
        dst_data[i] = inputDataBuf[i];
        if (dst_data[i] == 0) count++;
    }

    //jlog("Stat: adin_darwin: %d zero samples\n", count);


    pthread_mutex_unlock(&MutexInput);

#ifdef DEBUG
    jlog("Stat: adindarwin: EXIT: %d samples provided\n", providedSamples);
#endif

    return providedSamples;
}
Пример #25
0
OSStatus FeedSound(void *ref,AudioUnitRenderActionFlags *flags,const AudioTimeStamp *time,UInt32 bus,UInt32 count,AudioBufferList *blist){
	coreaudio	*audio;
	audio=(coreaudio*)ref;
	return AudioConverterFillComplexBuffer(audio->conv,Feed,ref,&count,blist,0);
}
Пример #26
0
// _______________________________________________________________________________________
//
void	CAAudioFile::Read(UInt32 &ioNumPackets, AudioBufferList *ioData)
			// May read fewer packets than requested if:
			//		buffer is not big enough
			//		file does not contain that many more packets
			// Note that eofErr is not fatal, just results in 0 packets returned
			// ioData's buffer sizes may be shortened
{
	XThrowIf(mClientMaxPacketSize == 0, kExtAudioFileError_MaxPacketSizeUnknown, "client maximum packet size is 0");
	if (mIOBufferList.mBuffers[0].mData == NULL) {
#if DEBUG
		printf("warning: CAAudioFile::AllocateBuffers called from ReadPackets\n");
#endif
		AllocateBuffers();
	}
	UInt32 bufferSizeBytes = ioData->mBuffers[0].mDataByteSize;
	UInt32 maxNumPackets = bufferSizeBytes / mClientMaxPacketSize;
	// older versions of AudioConverterFillComplexBuffer don't do this, so do our own sanity check
	UInt32 nPackets = std::min(ioNumPackets, maxNumPackets);

	mMaxPacketsToRead = ~0UL;

	if (mClientDataFormat.mFramesPerPacket == 1) {  // PCM or equivalent
		while (mFramesToSkipFollowingSeek > 0) {
			UInt32 skipFrames = std::min(mFramesToSkipFollowingSeek, maxNumPackets);
			UInt32 framesPerPacket;
			if ((framesPerPacket=mFileDataFormat.mFramesPerPacket) > 0)
				mMaxPacketsToRead = (skipFrames + framesPerPacket - 1) / framesPerPacket;

			if (mConverter == NULL) {
				XThrowIfError(ReadInputProc(NULL, &skipFrames, ioData, NULL, this), "read audio file");
			} else {
#if CAAUDIOFILE_PROFILE
				mInConverter = true;
#endif
				StartTiming(this, fill);
				XThrowIfError(AudioConverterFillComplexBuffer(mConverter, ReadInputProc, this, &skipFrames, ioData, NULL), "convert audio packets (pcm read)");
				ElapsedTime(this, fill, mTicksInConverter);
#if CAAUDIOFILE_PROFILE
				mInConverter = false;
#endif
			}
			if (skipFrames == 0) {	// hit EOF
				ioNumPackets = 0;
				return;
			}
			mFrameMark += skipFrames;
#if VERBOSE_IO
			printf("CAAudioFile::ReadPackets: skipped %ld frames\n", skipFrames);
#endif

			mFramesToSkipFollowingSeek -= skipFrames;

			// restore mDataByteSize
			for (int i = ioData->mNumberBuffers; --i >= 0 ; )
				ioData->mBuffers[i].mDataByteSize = bufferSizeBytes;
		}
	}

	if (mFileDataFormat.mFramesPerPacket > 0)
		// don't read more packets than we are being asked to produce
		mMaxPacketsToRead = nPackets / mFileDataFormat.mFramesPerPacket + 1;
	if (mConverter == NULL) {
		XThrowIfError(ReadInputProc(NULL, &nPackets, ioData, NULL, this), "read audio file");
	} else {
#if CAAUDIOFILE_PROFILE
		mInConverter = true;
#endif
		StartTiming(this, fill);
		XThrowIfError(AudioConverterFillComplexBuffer(mConverter, ReadInputProc, this, &nPackets, ioData, NULL), "convert audio packets (read)");
		ElapsedTime(this, fill, mTicksInConverter);
#if CAAUDIOFILE_PROFILE
		mInConverter = false;
#endif
	}
	if (mClientDataFormat.mFramesPerPacket == 1)
		mFrameMark += nPackets;

	ioNumPackets = nPackets;
}
Пример #27
0
/* This is called by audio file stream parser when it finds packets of audio */
void Audio_Stream::streamDataCallback(void *inClientData, UInt32 inNumberBytes, UInt32 inNumberPackets, const void *inInputData, AudioStreamPacketDescription *inPacketDescriptions)
{    
    AS_TRACE("%s: inNumberBytes %u, inNumberPackets %u\n", __FUNCTION__, inNumberBytes, inNumberPackets);
    Audio_Stream *THIS = static_cast<Audio_Stream*>(inClientData);
    
    if (!THIS->m_audioStreamParserRunning) {
        AS_TRACE("%s: stray callback detected!\n", __PRETTY_FUNCTION__);
        return;
    }
    
    for (int i = 0; i < inNumberPackets; i++) {
        /* Allocate the packet */
        UInt32 size = inPacketDescriptions[i].mDataByteSize;
        queued_packet_t *packet = (queued_packet_t *)malloc(sizeof(queued_packet_t) + size);
        
        if (THIS->m_bitrateBufferIndex < kAudioStreamBitrateBufferSize) {
            // Only keep sampling for one buffer cycle; this is to keep the counters (for instance) duration
            // stable.
            
            THIS->m_bitrateBuffer[THIS->m_bitrateBufferIndex++] = 8 * inPacketDescriptions[i].mDataByteSize / THIS->m_packetDuration;
        }
        
        
        /* Prepare the packet */
        packet->next = NULL;
        packet->desc = inPacketDescriptions[i];
        packet->desc.mStartOffset = 0;
        memcpy(packet->data, (const char *)inInputData + inPacketDescriptions[i].mStartOffset,
               size);
        
        if (THIS->m_queuedHead == NULL) {
            THIS->m_queuedHead = THIS->m_queuedTail = packet;
        } else {
            THIS->m_queuedTail->next = packet;
            THIS->m_queuedTail = packet;
        }
    }
    
    int count = 0;
    queued_packet_t *cur = THIS->m_queuedHead;
    while (cur) {
        cur = cur->next;
        count++;
    }
    
    Stream_Configuration *config = Stream_Configuration::configuration();
    
    if (count > config->decodeQueueSize) {
        AudioBufferList outputBufferList;
        outputBufferList.mNumberBuffers = 1;
        outputBufferList.mBuffers[0].mNumberChannels = THIS->m_dstFormat.mChannelsPerFrame;
        outputBufferList.mBuffers[0].mDataByteSize = THIS->m_outputBufferSize;
        outputBufferList.mBuffers[0].mData = THIS->m_outputBuffer;
        
        AudioStreamPacketDescription description;
        description.mStartOffset = 0;
        description.mDataByteSize = THIS->m_outputBufferSize;
        description.mVariableFramesInPacket = 0;
        
        UInt32 ioOutputDataPackets = THIS->m_outputBufferSize / THIS->m_dstFormat.mBytesPerPacket;
        
        AS_TRACE("calling AudioConverterFillComplexBuffer\n");
        
        OSStatus err = AudioConverterFillComplexBuffer(THIS->m_audioConverter,
                                                       &encoderDataCallback,
                                                       THIS,
                                                       &ioOutputDataPackets,
                                                       &outputBufferList,
                                                       NULL);
        if (err == noErr) {
            AS_TRACE("%i output bytes available for the audio queue\n", (unsigned int)ioOutputDataPackets);
            
            if (THIS->m_watchdogTimer) {
                AS_TRACE("The stream started to play, canceling the watchdog\n");
                
                CFRunLoopTimerInvalidate(THIS->m_watchdogTimer);
                CFRelease(THIS->m_watchdogTimer), THIS->m_watchdogTimer = 0;
            }
            
            THIS->setState(PLAYING);
            
            THIS->audioQueue()->handleAudioPackets(outputBufferList.mBuffers[0].mDataByteSize,
                                                   outputBufferList.mNumberBuffers,
                                                   outputBufferList.mBuffers[0].mData,
                                                   &description);
            
            if (THIS->m_delegate) {
                THIS->m_delegate->samplesAvailable(outputBufferList, description);
            }
            
            for(std::list<queued_packet_t*>::iterator iter = THIS->m_processedPackets.begin();
                iter != THIS->m_processedPackets.end(); iter++) {
                queued_packet_t *cur = *iter;
                free(cur);
            }
            THIS->m_processedPackets.clear();
        } else {
            AS_TRACE("AudioConverterFillComplexBuffer failed, error %i\n", err);
        }
    } else {
        AS_TRACE("Less than %i packets queued, returning...\n", config->decodeQueueSize);
    }
}
Пример #28
0
nsresult
AppleATDecoder::DecodeSample(mp4_demuxer::MP4Sample* aSample)
{
  // Array containing the queued decoded audio frames, about to be output.
  nsTArray<AudioDataValue> outputData;
  UInt32 channels = mOutputFormat.mChannelsPerFrame;
  // Pick a multiple of the frame size close to a power of two
  // for efficient allocation.
  const uint32_t MAX_AUDIO_FRAMES = 128;
  const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * channels;

  // Descriptions for _decompressed_ audio packets. ignored.
  nsAutoArrayPtr<AudioStreamPacketDescription>
    packets(new AudioStreamPacketDescription[MAX_AUDIO_FRAMES]);

  // This API insists on having packets spoon-fed to it from a callback.
  // This structure exists only to pass our state.
  PassthroughUserData userData =
    { channels, (UInt32)aSample->size, aSample->data };

  // Decompressed audio buffer
  nsAutoArrayPtr<AudioDataValue> decoded(new AudioDataValue[maxDecodedSamples]);

  do {
    AudioBufferList decBuffer;
    decBuffer.mNumberBuffers = 1;
    decBuffer.mBuffers[0].mNumberChannels = channels;
    decBuffer.mBuffers[0].mDataByteSize =
      maxDecodedSamples * sizeof(AudioDataValue);
    decBuffer.mBuffers[0].mData = decoded.get();

    // in: the max number of packets we can handle from the decoder.
    // out: the number of packets the decoder is actually returning.
    UInt32 numFrames = MAX_AUDIO_FRAMES;

    OSStatus rv = AudioConverterFillComplexBuffer(mConverter,
                                                  _PassthroughInputDataCallback,
                                                  &userData,
                                                  &numFrames /* in/out */,
                                                  &decBuffer,
                                                  packets.get());

    if (rv && rv != kNoMoreDataErr) {
      LOG("Error decoding audio stream: %d\n", rv);
      return NS_ERROR_FAILURE;
    }

    if (numFrames) {
      outputData.AppendElements(decoded.get(), numFrames * channels);
    }

    if (rv == kNoMoreDataErr) {
      break;
    }
  } while (true);

  if (outputData.IsEmpty()) {
    return NS_OK;
  }

  size_t numFrames = outputData.Length() / channels;
  int rate = mOutputFormat.mSampleRate;
  CheckedInt<Microseconds> duration = FramesToUsecs(numFrames, rate);
  if (!duration.isValid()) {
    NS_WARNING("Invalid count of accumulated audio samples");
    return NS_ERROR_FAILURE;
  }

#ifdef LOG_SAMPLE_DECODE
  LOG("pushed audio at time %lfs; duration %lfs\n",
      (double)aSample->composition_timestamp / USECS_PER_S,
      (double)duration.value() / USECS_PER_S);
#endif

  nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
  PodCopy(data.get(), &outputData[0], outputData.Length());
  nsRefPtr<AudioData> audio = new AudioData(aSample->byte_offset,
                                            aSample->composition_timestamp,
                                            duration.value(),
                                            numFrames,
                                            data.forget(),
                                            channels,
                                            rate);
  mCallback->Output(audio);
  return NS_OK;
}
Пример #29
0
/*
 * This callback is called when |AudioFileStreamParseBytes| has enough data to
 * extract one or more MP3 packets.
 */
void
AppleMP3Reader::AudioSampleCallback(UInt32 aNumBytes,
                                    UInt32 aNumPackets,
                                    const void *aData,
                                    AudioStreamPacketDescription *aPackets)
{
  LOGD("got %u bytes, %u packets\n", aNumBytes, aNumPackets);

  // 1 frame per packet * num channels * 32-bit float
  uint32_t decodedSize = MAX_AUDIO_FRAMES * mAudioChannels *
                         sizeof(AudioDataValue);

  // descriptions for _decompressed_ audio packets. ignored.
  nsAutoArrayPtr<AudioStreamPacketDescription>
    packets(new AudioStreamPacketDescription[MAX_AUDIO_FRAMES]);

  // This API insists on having MP3 packets spoon-fed to it from a callback.
  // This structure exists only to pass our state and the result of the parser
  // on to the callback above.
  PassthroughUserData userData = { this, aNumPackets, aNumBytes, aData, aPackets, false };

  do {
    // Decompressed audio buffer
    nsAutoArrayPtr<uint8_t> decoded(new uint8_t[decodedSize]);

    AudioBufferList decBuffer;
    decBuffer.mNumberBuffers = 1;
    decBuffer.mBuffers[0].mNumberChannels = mAudioChannels;
    decBuffer.mBuffers[0].mDataByteSize = decodedSize;
    decBuffer.mBuffers[0].mData = decoded.get();

    // in: the max number of packets we can handle from the decoder.
    // out: the number of packets the decoder is actually returning.
    UInt32 numFrames = MAX_AUDIO_FRAMES;

    OSStatus rv = AudioConverterFillComplexBuffer(mAudioConverter,
                                                  PassthroughInputDataCallback,
                                                  &userData,
                                                  &numFrames /* in/out */,
                                                  &decBuffer,
                                                  packets.get());

    if (rv && rv != kNeedMoreData) {
      LOGE("Error decoding audio stream: %x\n", rv);
      break;
    }

    // If we decoded zero frames then AudiOConverterFillComplexBuffer is out
    // of data to provide.  We drained its internal buffer completely on the
    // last pass.
    if (numFrames == 0 && rv == kNeedMoreData) {
      LOGD("FillComplexBuffer out of data exactly\n");
      break;
    }

    int64_t time = FramesToUsecs(mCurrentAudioFrame, mAudioSampleRate).value();
    int64_t duration = FramesToUsecs(numFrames, mAudioSampleRate).value();

    LOGD("pushed audio at time %lfs; duration %lfs\n",
         (double)time / USECS_PER_S, (double)duration / USECS_PER_S);

    AudioData *audio = new AudioData(mDecoder->GetResource()->Tell(),
                                     time, duration, numFrames,
                                     reinterpret_cast<AudioDataValue *>(decoded.forget()),
                                     mAudioChannels, mAudioSampleRate);
    mAudioQueue.Push(audio);

    mCurrentAudioFrame += numFrames;

    if (rv == kNeedMoreData) {
      // No error; we just need more data.
      LOGD("FillComplexBuffer out of data\n");
      break;
    }
  } while (true);
}
Пример #30
0
OSStatus FCoreAudioSoundSource::CoreAudioRenderCallback( void *InRefCon, AudioUnitRenderActionFlags *IOActionFlags,
														const AudioTimeStamp *InTimeStamp, UInt32 InBusNumber,
														UInt32 InNumberFrames, AudioBufferList *IOData )
{
	OSStatus Status = noErr;
	FCoreAudioSoundSource *Source = ( FCoreAudioSoundSource *)InRefCon;

	uint32 DataByteSize = InNumberFrames * sizeof( Float32 );
	uint32 PacketsRequested = InNumberFrames;
	uint32 PacketsObtained = 0;

	// AudioBufferList itself holds only one buffer, while AudioConverterFillComplexBuffer expects a couple of them
	struct
	{
		AudioBufferList BufferList;		
		AudioBuffer		AdditionalBuffers[5];
	} LocalBuffers;

	AudioBufferList *LocalBufferList = &LocalBuffers.BufferList;
	LocalBufferList->mNumberBuffers = IOData->mNumberBuffers;

	if( Source->Buffer && Source->Playing )
	{
		while( PacketsObtained < PacketsRequested )
		{
			int32 BufferFilledBytes = PacketsObtained * sizeof( Float32 );
			for( uint32 Index = 0; Index < LocalBufferList->mNumberBuffers; Index++ )
			{
				LocalBufferList->mBuffers[Index].mDataByteSize = DataByteSize - BufferFilledBytes;
				LocalBufferList->mBuffers[Index].mData = ( uint8 *)IOData->mBuffers[Index].mData + BufferFilledBytes;
			}

			uint32 PacketCount = PacketsRequested - PacketsObtained;
			Status = AudioConverterFillComplexBuffer( Source->CoreAudioConverter, &CoreAudioConvertCallback, InRefCon, &PacketCount, LocalBufferList, NULL );
			PacketsObtained += PacketCount;

			if( PacketCount == 0 || Status != noErr )
			{
				AudioConverterReset( Source->CoreAudioConverter );
				break;
			}
		}

		if( PacketsObtained == 0 )
		{
			*IOActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
		}
	}
	else
	{
		*IOActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
	}

	if( PacketsObtained < PacketsRequested )
	{
		// Fill the rest of buffers provided with zeroes
		int32 BufferFilledBytes = PacketsObtained * sizeof( Float32 );
		for( uint32 Index = 0; Index < IOData->mNumberBuffers; ++Index )
		{
			FMemory::Memzero( ( uint8 *)IOData->mBuffers[Index].mData + BufferFilledBytes, DataByteSize - BufferFilledBytes );
		}
	}
	
	return Status;
}