SINT SoundSourceCoreAudio::readSampleFrames(
        SINT numberOfFrames, CSAMPLE* sampleBuffer) {
    //if (!m_decoder) return 0;
    SINT numFramesRead = 0;

    while (numFramesRead < numberOfFrames) {
        SINT numFramesToRead = numberOfFrames - numFramesRead;

        AudioBufferList fillBufList;
        fillBufList.mNumberBuffers = 1;
        fillBufList.mBuffers[0].mNumberChannels = getChannelCount();
        fillBufList.mBuffers[0].mDataByteSize = frames2samples(numFramesToRead)
                * sizeof(sampleBuffer[0]);
        fillBufList.mBuffers[0].mData = sampleBuffer
                + frames2samples(numFramesRead);

        UInt32 numFramesToReadInOut = numFramesToRead; // input/output parameter
        OSStatus err = ExtAudioFileRead(m_audioFile, &numFramesToReadInOut,
                &fillBufList);
        if (0 == numFramesToReadInOut) {
            // EOF
            break;// done
        }
        numFramesRead += numFramesToReadInOut;
    }
    return numFramesRead;
}
void aubio_source_apple_audio_do_multi(aubio_source_apple_audio_t *s, fmat_t * read_to, uint_t * read) {
  UInt32 c, v, loadedPackets = s->block_size;
  OSStatus err = ExtAudioFileRead(s->audioFile, &loadedPackets, &s->bufferList);
  if (err) { AUBIO_ERROR("source_apple_audio: error in ExtAudioFileRead, %d\n", (int)err); goto beach;}

  short *data = (short*)s->bufferList.mBuffers[0].mData;

  smpl_t **buf = read_to->data;

  for (v = 0; v < loadedPackets; v++) {
    for (c = 0; c < s->channels; c++) {
      buf[c][v] = SHORT_TO_FLOAT(data[ v * s->channels + c]);
    }
  }
  // short read, fill with zeros
  if (loadedPackets < s->block_size) {
    for (v = loadedPackets; v < s->block_size; v++) {
      for (c = 0; c < s->channels; c++) {
        buf[c][v] = 0.;
      }
    }
  }
  *read = (uint_t)loadedPackets;
  return;
beach:
  *read = 0;
  return;
}
unsigned int SoundSourceCoreAudio::read(unsigned long size, const SAMPLE *destination) {
    //if (!m_decoder) return 0;
    OSStatus err;
    SAMPLE *destBuffer(const_cast<SAMPLE*>(destination));
    UInt32 numFrames = 0;//(size / 2); /// m_inputFormat.mBytesPerFrame);
    unsigned int totalFramesToRead = size/2;
    unsigned int numFramesRead = 0;
    unsigned int numFramesToRead = totalFramesToRead;

    while (numFramesRead < totalFramesToRead) { //FIXME: Hardcoded 2
    	numFramesToRead = totalFramesToRead - numFramesRead;

		AudioBufferList fillBufList;
		fillBufList.mNumberBuffers = 1; //Decode a single track?
		fillBufList.mBuffers[0].mNumberChannels = m_inputFormat.mChannelsPerFrame;
		fillBufList.mBuffers[0].mDataByteSize = math_min(1024, numFramesToRead*4);//numFramesToRead*sizeof(*destBuffer); // 2 = num bytes per SAMPLE
		fillBufList.mBuffers[0].mData = (void*)(&destBuffer[numFramesRead*2]);

			// client format is always linear PCM - so here we determine how many frames of lpcm
			// we can read/write given our buffer size
		numFrames = numFramesToRead; //This silly variable acts as both a parameter and return value.
		err = ExtAudioFileRead (m_audioFile, &numFrames, &fillBufList);
		//The actual number of frames read also comes back in numFrames.
		//(It's both a parameter to a function and a return value. wat apple?)
		//XThrowIfError (err, "ExtAudioFileRead");
		if (!numFrames) {
				// this is our termination condition
			break;
		}
		numFramesRead += numFrames;
    }
    return numFramesRead*2;
}
Int64 ExtAudioFileAudioSource::decodeData(float* buffer, UInt32 numFrames)
{
	RPRINT("decoding data\n");
    UInt32 numChannels = getNumChannels();
    OSStatus err = noErr;
    
    mReadBuffer.resize(numChannels * numFrames);
    
    // Set up the buffers
    setUpBuffers(&mReadBuffer[0], numChannels, numFrames);
    
    // Read the data out of our file, filling our output buffer
    UInt32 framesRead = numFrames;
	if(!isLoadedInMemory())
	{
		RScopedLock l(&mDecodeLock);
		err = ExtAudioFileRead (mAudioFile, &framesRead, mpBufferList);
	}
	else
	{
		err = ExtAudioFileRead (mAudioFile, &framesRead, mpBufferList);
	}
		
    if(err || framesRead == 0)
    {
        mEOF = true;
		RPRINT("done decoding data\n");
        return 0;
    }
    
    // The data is expected to be interlaced
    for(UInt32 j = 0; j < numChannels; ++j)
    {
        float *pTemp = &mReadBuffer[j * numFrames];
        float *pOut = &buffer[j];
        for(UInt32 i = j; i < framesRead; i++)
        {
            *pOut = *pTemp++;
            pOut += numChannels;
        }
    }
    
	RPRINT("done decoding data\n");
    return framesRead;    
}
    //==============================================================================
    bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
                      int64 startSampleInFile, int numSamples) override
    {
        clearSamplesBeyondAvailableLength (destSamples, numDestChannels, startOffsetInDestBuffer,
                                           startSampleInFile, numSamples, lengthInSamples);

        if (numSamples <= 0)
            return true;

        if (lastReadPosition != startSampleInFile)
        {
            OSStatus status = ExtAudioFileSeek (audioFileRef, startSampleInFile);
            if (status != noErr)
                return false;

            lastReadPosition = startSampleInFile;
        }

        while (numSamples > 0)
        {
            const int numThisTime = jmin (8192, numSamples);
            const size_t numBytes = sizeof (float) * (size_t) numThisTime;

            audioDataBlock.ensureSize (numBytes * numChannels, false);
            float* data = static_cast<float*> (audioDataBlock.getData());

            for (int j = (int) numChannels; --j >= 0;)
            {
                bufferList->mBuffers[j].mNumberChannels = 1;
                bufferList->mBuffers[j].mDataByteSize = (UInt32) numBytes;
                bufferList->mBuffers[j].mData = data;
                data += numThisTime;
            }

            UInt32 numFramesToRead = (UInt32) numThisTime;
            OSStatus status = ExtAudioFileRead (audioFileRef, &numFramesToRead, bufferList);
            if (status != noErr)
                return false;

            for (int i = numDestChannels; --i >= 0;)
            {
                if (destSamples[i] != nullptr)
                {
                    if (i < (int) numChannels)
                        memcpy (destSamples[i] + startOffsetInDestBuffer, bufferList->mBuffers[i].mData, numBytes);
                    else
                        zeromem (destSamples[i] + startOffsetInDestBuffer, numBytes);
                }
            }

            startOffsetInDestBuffer += numThisTime;
            numSamples -= numThisTime;
            lastReadPosition += numThisTime;
        }

        return true;
    }
Exemple #6
0
size_t ExtAFSource::readSamples(void *buffer, size_t nsamples)
{
    UInt32 ns = nsamples;
    UInt32 nb = ns * m_asbd.mBytesPerFrame;
    AudioBufferList abl = { 0 };
    abl.mNumberBuffers = 1;
    abl.mBuffers[0].mNumberChannels = m_asbd.mChannelsPerFrame;
    abl.mBuffers[0].mData = buffer;
    abl.mBuffers[0].mDataByteSize = nb;
    CHECKCA(ExtAudioFileRead(m_eaf, &ns, &abl));
    return ns;
}
void ExtAudioFile::convert(Bytes& data, ALenum& format, ALsizei& frequency) {
    OSStatus err;

    // Read in the original file format.
    AudioStreamBasicDescription in_format;
    UInt32 in_format_size = sizeof(AudioStreamBasicDescription);
    err = ExtAudioFileGetProperty(_id, kExtAudioFileProperty_FileDataFormat, &in_format_size,
                                  &in_format);
    check_os_err(err, "ExtAudioFileGetProperty");

    frequency = in_format.mSampleRate;
    if (in_format.mChannelsPerFrame == 1) {
        format = AL_FORMAT_MONO16;
    } else if (in_format.mChannelsPerFrame == 2) {
        format = AL_FORMAT_STEREO16;
    } else {
        throw Exception("audio file has more than two channels");
    }

    // Convert to 16-bit native-endian linear PCM.  Preserve the frequency and channel count
    // of the original format.
    AudioStreamBasicDescription out_format = in_format;
    out_format.mFormatID            = kAudioFormatLinearPCM;
    out_format.mBytesPerPacket      = 2 * out_format.mChannelsPerFrame;
    out_format.mFramesPerPacket     = 1;
    out_format.mBytesPerFrame       = 2 * out_format.mChannelsPerFrame;
    out_format.mBitsPerChannel      = 16;
    out_format.mFormatFlags         = kAudioFormatFlagsNativeEndian
                                      | kAudioFormatFlagIsPacked
                                      | kAudioFormatFlagIsSignedInteger;
    err = ExtAudioFileSetProperty(_id, kExtAudioFileProperty_ClientDataFormat,
                                  sizeof(AudioStreamBasicDescription), &out_format);
    check_os_err(err, "ExtAudioFileSetProperty");

    // Get the number of frames.
    SInt64 frame_count;
    UInt32 frame_count_size = sizeof(int64_t);
    err = ExtAudioFileGetProperty(_id, kExtAudioFileProperty_FileLengthFrames, &frame_count_size,
                                  &frame_count);
    check_os_err(err, "ExtAudioFileGetProperty");

    // Read the converted frames into memory.
    UInt32 frame_count_32 = frame_count;
    data.resize(frame_count * out_format.mBytesPerFrame);
    AudioBufferList data_buffer;
    data_buffer.mNumberBuffers = 1;
    data_buffer.mBuffers[0].mDataByteSize = data.size();
    data_buffer.mBuffers[0].mNumberChannels = out_format.mChannelsPerFrame;
    data_buffer.mBuffers[0].mData = data.data();
    err = ExtAudioFileRead(_id, &frame_count_32, &data_buffer);
    check_os_err(err, "ExtAudioFileRead");
}
UInt32 SFB::Audio::CoreAudioDecoder::_ReadAudio(AudioBufferList *bufferList, UInt32 frameCount)
{
	OSStatus result = ExtAudioFileRead(mExtAudioFile, &frameCount, bufferList);
	if(noErr != result) {
		LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileRead failed: " << result);
		return 0;
	}

	if(mUseM4AWorkarounds)
		mCurrentFrame += frameCount;

	return frameCount;
}
void Convert(MyAudioConverterSettings *mySettings)
{	
	
	UInt32 outputBufferSize = 32 * 1024; // 32 KB is a good starting point
	UInt32 sizePerPacket = mySettings->outputFormat.mBytesPerPacket;	
	UInt32 packetsPerBuffer = outputBufferSize / sizePerPacket;
	
	// allocate destination buffer
	UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8) * outputBufferSize);
	
	UInt32 outputFilePacketPosition = 0; //in bytes
	while(1)
	{
		// wrap the destination buffer in an AudioBufferList
		AudioBufferList convertedData;
		convertedData.mNumberBuffers = 1;
		convertedData.mBuffers[0].mNumberChannels = mySettings->outputFormat.mChannelsPerFrame;
		convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
		convertedData.mBuffers[0].mData = outputBuffer;
		
		UInt32 frameCount = packetsPerBuffer;
		
		// read from the extaudiofile
		CheckResult(ExtAudioFileRead(mySettings->inputFile,
									 &frameCount,
									 &convertedData),
					"Couldn't read from input file");
		
		if (frameCount == 0) {
			printf ("done reading from file");
			return;
		}
		
		// write the converted data to the output file
		CheckResult (AudioFileWritePackets(mySettings->outputFile,
										   FALSE,
										   frameCount,
										   NULL,
										   outputFilePacketPosition / mySettings->outputFormat.mBytesPerPacket, 
										   &frameCount,
										   convertedData.mBuffers[0].mData),
					 "Couldn't write packets to file");
		
		// advance the output file write location
		outputFilePacketPosition += (frameCount * mySettings->outputFormat.mBytesPerPacket);
	}
	
	// AudioConverterDispose(audioConverter);
}
int AudioDecoderCoreAudio::read(int size, const SAMPLE *destination) {
    OSStatus err;
    SAMPLE *destBuffer(const_cast<SAMPLE*>(destination));
    unsigned int samplesWritten = 0;
    unsigned int i = 0;
    UInt32 numFrames = 0;
    unsigned int totalFramesToRead = size/2;
    unsigned int numFramesRead = 0;
    unsigned int numFramesToRead = totalFramesToRead;

    while (numFramesRead < totalFramesToRead) { 
    	numFramesToRead = totalFramesToRead - numFramesRead;
    	
		AudioBufferList fillBufList;
		fillBufList.mNumberBuffers = 1; //Decode a single track
        //See CoreAudioTypes.h for definitins of these variables:
		fillBufList.mBuffers[0].mNumberChannels = m_clientFormat.NumberChannels();
		fillBufList.mBuffers[0].mDataByteSize = numFramesToRead*2 * sizeof(SAMPLE);
		fillBufList.mBuffers[0].mData = (void*)(&destBuffer[numFramesRead*2]);
			
        // client format is always linear PCM - so here we determine how many frames of lpcm
        // we can read/write given our buffer size
		numFrames = numFramesToRead; //This silly variable acts as both a parameter and return value.
		err = ExtAudioFileRead (m_audioFile, &numFrames, &fillBufList);
		//The actual number of frames read also comes back in numFrames.
		//(It's both a parameter to a function and a return value. wat apple?)
		//XThrowIfError (err, "ExtAudioFileRead");	
        /*
        if (err != noErr)
        {
            std::cerr << "Error reading samples from file" << std::endl;
            return 0;
        }*/

		if (!numFrames) {
				// this is our termination condition
			break;
		}
		numFramesRead += numFrames;
    }
    
    m_iPositionInSamples += numFramesRead*m_iChannels;

    return numFramesRead*m_iChannels;
}
Exemple #11
0
static OSStatus renderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData)
{
   UInt32 i;
   Audio *audio = (Audio*) inRefCon;
   OSStatus err;
   float *outL, *outR, *wave;

   /* initialize buffer */
   if(audio->buff.mBuffers[0].mDataByteSize < inNumberFrames * 2 * sizeof(float)) {
      if(audio->buff.mBuffers[0].mData != NULL)
         free(audio->buff.mBuffers[0].mData);
      audio->buff.mNumberBuffers = 1;
      audio->buff.mBuffers[0].mNumberChannels = 2;
      audio->buff.mBuffers[0].mData = malloc(inNumberFrames * 2 * sizeof(float) );
      audio->buff.mBuffers[0].mDataByteSize = inNumberFrames * 2 * sizeof(float);
   }

   /* read */
   err = ExtAudioFileRead(audio->file, &inNumberFrames, &audio->buff);
   if (err) {
      audio->end = true;
      return err;
   }

   /* end of file */
   if ( inNumberFrames == 0 ) {
      audio->end = true;
      return noErr;
   }

   /* copy */
   outL = (float*) ioData->mBuffers[0].mData;
   outR = (float*) ioData->mBuffers[1].mData;
   wave = (float*) audio->buff.mBuffers[0].mData;
   for (i = 0; i < inNumberFrames; i++) {
      *outL++ = *wave++;
      *outR++ = *wave++;
   }

   return noErr;
}
void fillALBuffer (MyStreamPlayer* player, ALuint alBuffer) {
	AudioBufferList *bufferList;
	UInt32 ablSize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * 1); // 1 channel
	bufferList = malloc (ablSize);
	
	// allocate sample buffer
	UInt16 *sampleBuffer = malloc(sizeof(UInt16) * player->bufferSizeBytes); // 4/18/11 - fix 2
	
	bufferList->mNumberBuffers = 1;
	bufferList->mBuffers[0].mNumberChannels = 1;
	bufferList->mBuffers[0].mDataByteSize = player->bufferSizeBytes;
	bufferList->mBuffers[0].mData = sampleBuffer;
	printf ("allocated %d byte buffer for ABL\n", 
			player->bufferSizeBytes);
	
	// read from ExtAudioFile into sampleBuffer
	// TODO: handle end-of-file wraparound
	UInt32 framesReadIntoBuffer = 0;
	do {
		UInt32 framesRead = player->fileLengthFrames - framesReadIntoBuffer;
		bufferList->mBuffers[0].mData = sampleBuffer + (framesReadIntoBuffer * (sizeof(UInt16))); // 4/18/11 - fix 3
		CheckError(ExtAudioFileRead(player->extAudioFile, 
									&framesRead,
									bufferList),
				   "ExtAudioFileRead failed");
		framesReadIntoBuffer += framesRead;
		player->totalFramesRead += framesRead;
		printf ("read %d frames\n", framesRead);
	} while (framesReadIntoBuffer < (player->bufferSizeBytes / sizeof(UInt16))); // 4/18/11 - fix 4
	
	// copy from sampleBuffer to AL buffer
	alBufferData(alBuffer,
				 AL_FORMAT_MONO16,
				 sampleBuffer,
				 player->bufferSizeBytes,
				 player->dataFormat.mSampleRate);
	
	free (bufferList);
	free (sampleBuffer);
}
size_t
CoreAudioReadStream::getFrames(size_t count, float *frames)
{
    if (!m_channelCount) return 0;
    if (count == 0) return 0;

    m_d->buffer.mBuffers[0].mDataByteSize =
        sizeof(float) * m_channelCount * count;
    
    m_d->buffer.mBuffers[0].mData = frames;

    UInt32 framesRead = count;

    m_d->err = ExtAudioFileRead(m_d->file, &framesRead, &m_d->buffer);
    if (m_d->err) {
        m_error = "CoreAudioReadStream: Error in decoder: code " + codestr(m_d->err);
        throw InvalidFileFormat(m_path, "error in decoder");
    }

 //   cerr << "CoreAudioReadStream::getFrames: " << count << " frames requested across " << m_channelCount << " channel(s), " << framesRead << " frames actually read" << std::endl;

    return framesRead;
}
Exemple #14
0
bool PlayBuffer::read(core::data &da)
{
# ifdef NNT_MACH

    AudioBufferList buf;
    buf.mNumberBuffers = 1;
    buf.mBuffers[0].mDataByteSize = da.length();
    buf.mBuffers[0].mNumberChannels = d_ptr->ofmt.mChannelsPerFrame;
    buf.mBuffers[0].mData = da.bytes();

    UInt32 frames = da.length() / d_ptr->ofmt.mBytesPerFrame;
    OSStatus sta = ExtAudioFileRead(d_ptr->extstm,
                                    &frames,
                                    &buf);

    usize readed = frames * d_ptr->ofmt.mBytesPerFrame;
    da.set_length(readed);

    return sta == 0;

# endif

    return false;
}
void *GetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei *outSampleRate)
{
	OSStatus err = noErr;

	ExtAudioFileRef extRef = NULL;
	err = ExtAudioFileOpenURL(inFileURL, &extRef);
	if (err != noErr)
	{
		printf("GetOpenALAudioData: ExtAudioFileOpenURL FAILED, Error = %ld\n", err);
		return NULL;
	}

	AudioStreamBasicDescription theFileFormat;
	UInt32 thePropertySize = sizeof(theFileFormat);
	err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileDataFormat, &thePropertySize, &theFileFormat);
	if (err != noErr)
	{
		printf("GetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileDataFormat) FAILED, Error = %ld\n", err);
		ExtAudioFileDispose(extRef);
		return NULL;
	}

	if (theFileFormat.mChannelsPerFrame > 2)
	{
		printf("GetOpenALAudioData: Unsupported format, channel count is greater than stereo\n");
		ExtAudioFileDispose(extRef);
		return NULL;
	}

	// Set the output format to 16 bit signed integer (native-endian) data
	// Maintain the channel count and sample rate of the original source format
	AudioStreamBasicDescription theOutputFormat;
	theOutputFormat.mSampleRate = theFileFormat.mSampleRate;
	theOutputFormat.mChannelsPerFrame = theFileFormat.mChannelsPerFrame;
	theOutputFormat.mFormatID = kAudioFormatLinearPCM;
	theOutputFormat.mBytesPerPacket = 2 * theOutputFormat.mChannelsPerFrame;
	theOutputFormat.mFramesPerPacket = 1;
	theOutputFormat.mBytesPerFrame = 2 * theOutputFormat.mChannelsPerFrame;
	theOutputFormat.mBitsPerChannel = 16;
	theOutputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;

	err = ExtAudioFileSetProperty(extRef, kExtAudioFileProperty_ClientDataFormat, sizeof(theOutputFormat), &theOutputFormat);
	if (err != noErr)
	{
		printf("GetOpenALAudioData: ExtAudioFileSetProperty(kExtAudioFileProperty_ClientDataFormat) FAILED, Error = %ld\n", err);
		ExtAudioFileDispose(extRef);
		return NULL;
	}

	SInt64 theFileLengthInFrames = 0;
	thePropertySize = sizeof(theFileLengthInFrames);
	err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
	if (err != noErr)
	{
		printf("GetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileLengthFrames) FAILED, Error = %ld\n", err);
		ExtAudioFileDispose(extRef);
		return NULL;
	}

	UInt32 dataSize = theFileLengthInFrames * theOutputFormat.mBytesPerFrame;
	void *theData = malloc(dataSize);
	if (theData == NULL)
	{
		printf("GetOpenALAudioData: malloc FAILED\n");
		ExtAudioFileDispose(extRef);
		return NULL;
	}

	AudioBufferList theDataBuffer;
	theDataBuffer.mNumberBuffers = 1;
	theDataBuffer.mBuffers[0].mDataByteSize = dataSize;
	theDataBuffer.mBuffers[0].mNumberChannels = theOutputFormat.mChannelsPerFrame;
	theDataBuffer.mBuffers[0].mData = theData;

	err = ExtAudioFileRead(extRef, (UInt32 *)&theFileLengthInFrames, &theDataBuffer);
	if (err != noErr)
	{ 
		printf("GetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err);
		free(theData);
		ExtAudioFileDispose(extRef);
		return NULL;
	}

	*outDataSize = (ALsizei)dataSize;
	*outDataFormat = (theOutputFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
	*outSampleRate = (ALsizei)theOutputFormat.mSampleRate;

	ExtAudioFileDispose(extRef);
	return theData;
}
Exemple #16
0
int ConvertFile (CFURLRef					inputFileURL,
                 CAStreamBasicDescription	&inputFormat,
                 CFURLRef					outputFileURL,
                 AudioFileTypeID				outputFileType,
                 CAStreamBasicDescription	&outputFormat,
                 UInt32                      outputBitRate)
{
	ExtAudioFileRef infile, outfile;
    
    // first open the input file
	OSStatus err = ExtAudioFileOpenURL (inputFileURL, &infile);
	XThrowIfError (err, "ExtAudioFileOpen");
	
	// if outputBitRate is specified, this can change the sample rate of the output file
	// so we let this "take care of itself"
	if (outputBitRate)
		outputFormat.mSampleRate = 0.;
    
	// create the output file (this will erase an exsiting file)
	err = ExtAudioFileCreateWithURL (outputFileURL, outputFileType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &outfile);
	XThrowIfError (err, "ExtAudioFileCreateNew");
	
	// get and set the client format - it should be lpcm
	CAStreamBasicDescription clientFormat = (inputFormat.mFormatID == kAudioFormatLinearPCM ? inputFormat : outputFormat);
	UInt32 size = sizeof(clientFormat);
	err = ExtAudioFileSetProperty(infile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
	XThrowIfError (err, "ExtAudioFileSetProperty inFile, kExtAudioFileProperty_ClientDataFormat");
	
	size = sizeof(clientFormat);
	err = ExtAudioFileSetProperty(outfile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
	XThrowIfError (err, "ExtAudioFileSetProperty outFile, kExtAudioFileProperty_ClientDataFormat");
	
	if( outputBitRate > 0 ) {
		printf ("Dest bit rate: %d\n", (int)outputBitRate);
		AudioConverterRef outConverter;
		size = sizeof(outConverter);
		err = ExtAudioFileGetProperty(outfile, kExtAudioFileProperty_AudioConverter, &size, &outConverter);
		XThrowIfError (err, "ExtAudioFileGetProperty outFile, kExtAudioFileProperty_AudioConverter");
		
		err = AudioConverterSetProperty(outConverter, kAudioConverterEncodeBitRate,
										sizeof(outputBitRate), &outputBitRate);
		XThrowIfError (err, "AudioConverterSetProperty, kAudioConverterEncodeBitRate");
		
		// we have changed the converter, so we should do this in case
		// setting a converter property changes the converter used by ExtAF in some manner
		CFArrayRef config = NULL;
		err = ExtAudioFileSetProperty(outfile, kExtAudioFileProperty_ConverterConfig, sizeof(config), &config);
		XThrowIfError (err, "ExtAudioFileSetProperty outFile, kExtAudioFileProperty_ConverterConfig");
	}
	
	// set up buffers
	char srcBuffer[kSrcBufSize];
    
	// do the read and write - the conversion is done on and by the write call
	while (1)
	{
		AudioBufferList fillBufList;
		fillBufList.mNumberBuffers = 1;
		fillBufList.mBuffers[0].mNumberChannels = inputFormat.mChannelsPerFrame;
		fillBufList.mBuffers[0].mDataByteSize = kSrcBufSize;
		fillBufList.mBuffers[0].mData = srcBuffer;
        
		// client format is always linear PCM - so here we determine how many frames of lpcm
		// we can read/write given our buffer size
		UInt32 numFrames = (kSrcBufSize / clientFormat.mBytesPerFrame);
		
		// printf("test %d\n", numFrames);
        
		err = ExtAudioFileRead (infile, &numFrames, &fillBufList);
		XThrowIfError (err, "ExtAudioFileRead");
		if (!numFrames) {
			// this is our termination condition
			break;
		}
		
		err = ExtAudioFileWrite(outfile, numFrames, &fillBufList);
		XThrowIfError (err, "ExtAudioFileWrite");
	}
    
    // close
	ExtAudioFileDispose(outfile);
	ExtAudioFileDispose(infile);
	
    return 0;
}
Exemple #17
0
void* MyGetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei*	outSampleRate)
{
	OSStatus						err = noErr;	
	SInt64							theFileLengthInFrames = 0;
	AudioStreamBasicDescription		theFileFormat;
	UInt32							thePropertySize = sizeof(theFileFormat);
	ExtAudioFileRef					extRef = NULL;
	void*							theData = NULL;
	AudioStreamBasicDescription		theOutputFormat;

	// Open a file with ExtAudioFileOpen()
	err = ExtAudioFileOpenURL(inFileURL, &extRef);
	if(err) { printf("MyGetOpenALAudioData: ExtAudioFileOpenURL FAILED, Error = %ld\n", err); goto Exit; }
	
	// Get the audio data format
	err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileDataFormat, &thePropertySize, &theFileFormat);
	if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
	if (theFileFormat.mChannelsPerFrame > 2)  { printf("MyGetOpenALAudioData - Unsupported Format, channel count is greater than stereo\n"); goto Exit;}

	// Set the client format to 16 bit signed integer (native-endian) data
	// Maintain the channel count and sample rate of the original source format
	theOutputFormat.mSampleRate = theFileFormat.mSampleRate;
	theOutputFormat.mChannelsPerFrame = theFileFormat.mChannelsPerFrame;

	theOutputFormat.mFormatID = kAudioFormatLinearPCM;
	theOutputFormat.mBytesPerPacket = 2 * theOutputFormat.mChannelsPerFrame;
	theOutputFormat.mFramesPerPacket = 1;
	theOutputFormat.mBytesPerFrame = 2 * theOutputFormat.mChannelsPerFrame;
	theOutputFormat.mBitsPerChannel = 16;
	theOutputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
	
	// Set the desired client (output) data format
	err = ExtAudioFileSetProperty(extRef, kExtAudioFileProperty_ClientDataFormat, sizeof(theOutputFormat), &theOutputFormat);
	if(err) { printf("MyGetOpenALAudioData: ExtAudioFileSetProperty(kExtAudioFileProperty_ClientDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
	
	// Get the total frame count
	thePropertySize = sizeof(theFileLengthInFrames);
	err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
	if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileLengthFrames) FAILED, Error = %ld\n", err); goto Exit; }
	
	// Read all the data into memory
	UInt32		dataSize = theFileLengthInFrames * theOutputFormat.mBytesPerFrame;;
	theData = malloc(dataSize);
	if (theData)
	{
		AudioBufferList		theDataBuffer;
		theDataBuffer.mNumberBuffers = 1;
		theDataBuffer.mBuffers[0].mDataByteSize = dataSize;
		theDataBuffer.mBuffers[0].mNumberChannels = theOutputFormat.mChannelsPerFrame;
		theDataBuffer.mBuffers[0].mData = theData;
		
		// Read the data into an AudioBufferList
		err = ExtAudioFileRead(extRef, (UInt32*)&theFileLengthInFrames, &theDataBuffer);
		if(err == noErr)
		{
			// success
			*outDataSize = (ALsizei)dataSize;
			*outDataFormat = (theOutputFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
			*outSampleRate = (ALsizei)theOutputFormat.mSampleRate;
		}
		else 
		{ 
			// failure
			free (theData);
			theData = NULL; // make sure to return NULL
			printf("MyGetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err); goto Exit;
		}	
	}
	
Exit:
	// Dispose the ExtAudioFileRef, it is no longer needed
	if (extRef) ExtAudioFileDispose(extRef);
	return theData;
}
OSStatus loadLoopIntoBuffer(MyLoopPlayer* player) {
	CFURLRef loopFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, 
														 LOOP_PATH,
														 kCFURLPOSIXPathStyle,
														 false);
	
	// describe the client format - AL needs mono
	memset(&player->dataFormat, 0, sizeof(player->dataFormat));
	player->dataFormat.mFormatID = kAudioFormatLinearPCM;
	player->dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	player->dataFormat.mSampleRate = 44100.0;
	player->dataFormat.mChannelsPerFrame = 1;
	player->dataFormat.mFramesPerPacket = 1;
	player->dataFormat.mBitsPerChannel = 16;
	player->dataFormat.mBytesPerFrame = 2;
	player->dataFormat.mBytesPerPacket = 2;
	
	ExtAudioFileRef extAudioFile;
	CheckError (ExtAudioFileOpenURL(loopFileURL, &extAudioFile),
				"Couldn't open ExtAudioFile for reading");
	
	// tell extAudioFile about our format
	CheckError(ExtAudioFileSetProperty(extAudioFile,
									   kExtAudioFileProperty_ClientDataFormat,
									   sizeof (AudioStreamBasicDescription),
									   &player->dataFormat),
			   "Couldn't set client format on ExtAudioFile");
	
	// figure out how big a buffer we need
	SInt64 fileLengthFrames;
	UInt32 propSize = sizeof (fileLengthFrames);
	ExtAudioFileGetProperty(extAudioFile,
							kExtAudioFileProperty_FileLengthFrames,
							&propSize,
							&fileLengthFrames);
	
	printf ("plan on reading %lld frames\n", fileLengthFrames);
	player->bufferSizeBytes = fileLengthFrames * player->dataFormat.mBytesPerFrame;
	
	AudioBufferList *buffers;
	UInt32 ablSize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * 1); // 1 channel
	buffers = malloc (ablSize);
	
	// allocate sample buffer
	player->sampleBuffer =  malloc(sizeof(UInt16) * player->bufferSizeBytes); // 4/18/11 - fix 1
	
	buffers->mNumberBuffers = 1;
	buffers->mBuffers[0].mNumberChannels = 1;
	buffers->mBuffers[0].mDataByteSize = player->bufferSizeBytes;
	buffers->mBuffers[0].mData = player->sampleBuffer;
	
	printf ("created AudioBufferList\n");
	
	// loop reading into the ABL until buffer is full
	UInt32 totalFramesRead = 0;
	do {
		UInt32 framesRead = fileLengthFrames - totalFramesRead;
		buffers->mBuffers[0].mData = player->sampleBuffer + (totalFramesRead * (sizeof(UInt16)));
		CheckError(ExtAudioFileRead(extAudioFile, 
									&framesRead,
									buffers),
				   "ExtAudioFileRead failed");
		totalFramesRead += framesRead;
		printf ("read %d frames\n", framesRead);
	} while (totalFramesRead < fileLengthFrames);
	
	// can free the ABL; still have samples in sampleBuffer
	free(buffers);
	return noErr;
}
static Uint32 CoreAudio_read(Sound_Sample *sample)
{
	OSStatus error_result = noErr;	
	/* Documentation/example shows SInt64, but is problematic for big endian
	 * on 32-bit cast for ExtAudioFileRead() because it takes the upper
	 * bits which turn to 0.
	 */
	UInt32 buffer_size_in_frames = 0;
	UInt32 buffer_size_in_frames_remaining = 0;
	UInt32 total_frames_read = 0;
	UInt32 data_buffer_size = 0;
	UInt32 bytes_remaining = 0;
	size_t total_bytes_read = 0;
	Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
	CoreAudioFileContainer* core_audio_file_container = (CoreAudioFileContainer *) internal->decoder_private;
	UInt32 max_buffer_size = internal->buffer_size;

//	printf("internal->buffer_size=%d, internal->buffer=0x%x, sample->buffer_size=%d\n", internal->buffer_size, internal->buffer, sample->buffer_size); 
//	printf("internal->max_buffer_size=%d\n", max_buffer_size); 

	/* Compute how many frames will fit into our max buffer size */
	/* Warning: If this is not evenly divisible, the buffer will not be completely filled which violates the SDL_sound assumption. */
	buffer_size_in_frames = max_buffer_size / core_audio_file_container->outputFormat->mBytesPerFrame;
//	printf("buffer_size_in_frames=%ld, internal->buffer_size=%d, internal->buffer=0x%x outputFormat->mBytesPerFrame=%d, sample->buffer_size=%d\n", buffer_size_in_frames, internal->buffer_size, internal->buffer, core_audio_file_container->outputFormat->mBytesPerFrame, sample->buffer_size); 


//	void* temp_buffer = malloc(max_buffer_size);
	
	AudioBufferList audio_buffer_list;
	audio_buffer_list.mNumberBuffers = 1;
	audio_buffer_list.mBuffers[0].mDataByteSize = max_buffer_size;
	audio_buffer_list.mBuffers[0].mNumberChannels = core_audio_file_container->outputFormat->mChannelsPerFrame;
	audio_buffer_list.mBuffers[0].mData = internal->buffer;


	bytes_remaining = max_buffer_size;
	buffer_size_in_frames_remaining = buffer_size_in_frames;
	
	// oops. Due to the kAudioFormatFlagIsPacked bug, 
	// I was misled to believe that Core Audio
	// was not always filling my entire requested buffer. 
	// So this while-loop might be unnecessary.
	// However, I have not exhaustively tested all formats, 
	// so maybe it is possible this loop is useful.
	// It might also handle the not-evenly disvisible case above.
	while(buffer_size_in_frames_remaining > 0 && !(sample->flags & SOUND_SAMPLEFLAG_EOF))
	{
		
		data_buffer_size = (UInt32)(buffer_size_in_frames * core_audio_file_container->outputFormat->mBytesPerFrame);
//		printf("data_buffer_size=%d\n", data_buffer_size); 

		buffer_size_in_frames = buffer_size_in_frames_remaining;
		
//		printf("reading buffer_size_in_frames=%"PRId64"\n", buffer_size_in_frames); 


		audio_buffer_list.mBuffers[0].mDataByteSize = bytes_remaining;
		audio_buffer_list.mBuffers[0].mData = &(((UInt8*)internal->buffer)[total_bytes_read]);

		
		/* Read the data into an AudioBufferList */
		error_result = ExtAudioFileRead(core_audio_file_container->extAudioFileRef, &buffer_size_in_frames, &audio_buffer_list);
		if(error_result == noErr)
		{
		
		
			/* Success */
			
			total_frames_read += buffer_size_in_frames;
			buffer_size_in_frames_remaining = buffer_size_in_frames_remaining - buffer_size_in_frames;
			
//			printf("read buffer_size_in_frames=%"PRId64", buffer_size_in_frames_remaining=%"PRId64"\n", buffer_size_in_frames, buffer_size_in_frames_remaining); 

			/* ExtAudioFileRead returns the number of frames actually read. Need to convert back to bytes. */
			data_buffer_size = (UInt32)(buffer_size_in_frames * core_audio_file_container->outputFormat->mBytesPerFrame);
//			printf("data_buffer_size=%d\n", data_buffer_size); 

			total_bytes_read += data_buffer_size;
			bytes_remaining = bytes_remaining - data_buffer_size;

			/* Note: 0 == buffer_size_in_frames is a legitimate value meaning we are EOF. */
			if(0 == buffer_size_in_frames)
			{
				sample->flags |= SOUND_SAMPLEFLAG_EOF;			
			}

		}
		else 
		{
			SNDDBG(("Core Audio: ExtAudioFileReadfailed, reason: [%s].\n", CoreAudio_FourCCToString(error_result)));

			sample->flags |= SOUND_SAMPLEFLAG_ERROR;
			break;
			
		}
	}
	
	if( (!(sample->flags & SOUND_SAMPLEFLAG_EOF)) && (total_bytes_read < max_buffer_size))
	{
		SNDDBG(("Core Audio: ExtAudioFileReadfailed SOUND_SAMPLEFLAG_EAGAIN, reason: [total_bytes_read < max_buffer_size], %d, %d.\n", total_bytes_read , max_buffer_size));
		
		/* Don't know what to do here. */
		sample->flags |= SOUND_SAMPLEFLAG_EAGAIN;
	}
	return total_bytes_read;
} /* CoreAudio_read */
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate) 
{
    ExtAudioFileRef sourceFile = 0;
    ExtAudioFileRef destinationFile = 0;
    Boolean         canResumeFromInterruption = true; // we can continue unless told otherwise
    OSStatus        error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    ThreadStateSetRunning();
    
    printf("DoConvertFile\n");
    
	try {
        CAStreamBasicDescription srcFormat, dstFormat;

        // open the source file
        XThrowIfError(ExtAudioFileOpenURL(sourceURL, &sourceFile), "ExtAudioFileOpenURL failed");
			
        // get the source data format
		UInt32 size = sizeof(srcFormat);
		XThrowIfError(ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &size, &srcFormat), "couldn't get source data format");
		
		printf("\nSource file format: "); srcFormat.Print();

        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if PCM was selected as the destination format, create a 16-bit int PCM file format description
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("\nDestination file format: "); dstFormat.Print();
        
        // create the destination file 
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, NULL, kAudioFileFlags_EraseFile, &destinationFile), "ExtAudioFileCreateWithURL failed!");

        // set the client format - The format must be linear PCM (kAudioFormatLinearPCM)
        // You must set this in order to encode or decode a non-PCM file data format
        // You may set this on PCM files to specify the data format used in your calls to read/write
        CAStreamBasicDescription clientFormat;
        if (outputFormat == kAudioFormatLinearPCM) {
            clientFormat = dstFormat;
        } else {
            clientFormat.SetCanonical(srcFormat.NumberChannels(), true);
            clientFormat.mSampleRate = srcFormat.mSampleRate;
        }
        
        printf("\nClient data format: "); clientFormat.Print();
        printf("\n");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set source client format");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(destinationFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set destination client format");

        // can the audio converter (which in this case is owned by an ExtAudioFile object) resume conversion after an interruption?
        AudioConverterRef audioConverter;
                    
        size = sizeof(audioConverter);
        XThrowIfError(ExtAudioFileGetProperty(destinationFile, kExtAudioFileProperty_AudioConverter, &size, &audioConverter), "Couldn't get Audio Converter!");
        
        // this property may be queried at any time after construction of the audio converter (which in this case is owned by an ExtAudioFile object)
        // after setting the output format -- there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(audioConverter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported!\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld\n", error);
            }
            
            error = noErr;
        }
        
        // set up buffers
        UInt32 bufferByteSize = 32768;
        char srcBuffer[bufferByteSize];
        
        // keep track of the source file offset so we know where to reset the source for
        // reading if interrupted and input was not consumed by the audio converter
        SInt64 sourceFrameOffset = 0;
        
        //***** do the read and write - the conversion is done on and by the write call *****//
        printf("Converting...\n");
        while (1) {
        
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = clientFormat.NumberChannels();
            fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
            fillBufList.mBuffers[0].mData = srcBuffer;
                
            // client format is always linear PCM - so here we determine how many frames of lpcm
            // we can read/write given our buffer size
            UInt32 numFrames;
            if (clientFormat.mBytesPerFrame > 0) // rids bogus analyzer div by zero warning mBytesPerFrame can't be 0 and is protected by an Assert
                numFrames = clientFormat.BytesToFrames(bufferByteSize); // (bufferByteSize / clientFormat.mBytesPerFrame);

            XThrowIfError(ExtAudioFileRead(sourceFile, &numFrames, &fillBufList), "ExtAudioFileRead failed!");	
            if (!numFrames) {
                // this is our termination condition
                error = noErr;
                break;
            }
            sourceFrameOffset += numFrames;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = ThreadStatePausedCheck();
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the audio converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            error = ExtAudioFileWrite(destinationFile, numFrames, &fillBufList);
            // if interrupted in the process of the write call, we must handle the errors appropriately
            if (error) {
                if (kExtAudioFileError_CodecUnavailableInputConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was successfully
                        consumed and you may proceed to the next buffer
                    */
                    
                } else if (kExtAudioFileError_CodecUnavailableInputNotConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputNotConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was not
                        successfully consumed and you must try to write it again
                    */
                    
                    // seek back to last offset before last read so we can try again after the interruption
                    sourceFrameOffset -= numFrames;
                    XThrowIfError(ExtAudioFileSeek(sourceFile, sourceFrameOffset), "ExtAudioFileSeek failed!");
                    
                } else {
                    XThrowIfError(error, "ExtAudioFileWrite error!");
                }
            } // if
        } // while
	}
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // close
    if (destinationFile) ExtAudioFileDispose(destinationFile);
    if (sourceFile) ExtAudioFileDispose(sourceFile);

    // transition thread state to kStateDone before continuing
    ThreadStateSetDone();
    
    return error;
}
Exemple #21
0
core_audio_sound* CCoreAudioSoundManager::LoadSoundFromFile(const CStdString& fileName)
{
  FSRef fileRef;
  UInt32 size = 0;
  ExtAudioFileRef audioFile;
  OSStatus ret = FSPathMakeRef((const UInt8*) fileName.c_str(), &fileRef, false);
  ret = ExtAudioFileOpen(&fileRef, &audioFile);
  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioSoundManager::LoadSoundFromFile: Unable to open source file (%s). Error = 0x%08x (%4.4s)", fileName.c_str(), ret, CONVERT_OSSTATUS(ret));
    return NULL;
  }    
  
  core_audio_sound* pSound = new core_audio_sound;
  
  // Retrieve the format of the source file
  AudioStreamBasicDescription inputFormat;
  size = sizeof(inputFormat);
  ret = ExtAudioFileGetProperty(audioFile, kExtAudioFileProperty_FileDataFormat, &size, &inputFormat);
  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioSoundManager::LoadSoundFromFile: Unable to fetch source file format. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret));
    delete pSound;
    return NULL;
  }  
  
  // Set up format conversion. This is the format that will be produced by Read/Write calls. 
  // Here we use the same format provided to the output AudioUnit
  ret = ExtAudioFileSetProperty(audioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &m_OutputFormat);
  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioSoundManager::LoadSoundFromFile: Unable to set conversion format. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret));
    delete pSound;
    return NULL;
  }    
  
  // Retrieve the file size (in terms of the file's sample-rate, not the output sample-rate)
  UInt64 totalFrames;
  size = sizeof(totalFrames);
  ret = ExtAudioFileGetProperty(audioFile, kExtAudioFileProperty_FileLengthFrames, &size, &totalFrames);
  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioSoundManager::LoadSoundFromFile: Unable to fetch source file size. Error = 0x%08x (%4.4s)", ret, CONVERT_OSSTATUS(ret));
    delete pSound;
    return NULL;
  }  
  
  // Calculate the total number of converted frames to be read
  totalFrames *= (float)m_OutputFormat.mSampleRate / (float)inputFormat.mSampleRate; // TODO: Verify the accuracy of this
  
  // Allocate AudioBuffers
  UInt32 channelCount = m_OutputFormat.mChannelsPerFrame;
  pSound->buffer_list = (AudioBufferList*)calloc(1, sizeof(AudioBufferList) + sizeof(AudioBuffer) * (channelCount - kVariableLengthArray));
  pSound->buffer_list->mNumberBuffers = channelCount; // One buffer per channel for deinterlaced pcm
  float* buffers = (float*)calloc(1, sizeof(float) * totalFrames * channelCount);
  for(int i = 0; i < channelCount; i++)
  {
    pSound->buffer_list->mBuffers[i].mNumberChannels = 1; // One channel per buffer for deinterlaced pcm
    pSound->buffer_list->mBuffers[i].mData = buffers + (totalFrames * i);
    pSound->buffer_list->mBuffers[i].mDataByteSize = totalFrames * sizeof(float);
  }
  
  // Read the entire file
  // TODO: Should we limit the total file length?
  UInt32 readFrames = totalFrames;
  ret = ExtAudioFileRead(audioFile, &readFrames, pSound->buffer_list);
  if (ret)
  {
    CLog::Log(LOGERROR, "CCoreAudioSoundManager::LoadSoundFromFile: Unable to read from file (%s). Error = 0x%08x (%4.4s)", fileName.c_str(), ret, CONVERT_OSSTATUS(ret));
    delete pSound;
    return NULL;
  }  
  pSound->total_frames = readFrames; // Store the actual number of frames read from the file. Rounding errors in calcuating the converted number of frames can truncate the read.
  
  // TODO: What do we do with files with more than 2 channels. Currently we just copy the first two and dump the rest.
  if (inputFormat.mChannelsPerFrame == 1) // Copy Left channel into Right if the source file is Mono
    memcpy(pSound->buffer_list->mBuffers[1].mData, pSound->buffer_list->mBuffers[0].mData, pSound->buffer_list->mBuffers[0].mDataByteSize);
  
  ret = ExtAudioFileDispose(audioFile); // Close the file. We have what we need. Not a lot to be done on failure.
  if (ret)
    CLog::Log(LOGERROR, "CCoreAudioSoundManager::LoadSoundFromFile: Unable to close file (%s). Error = 0x%08x (%4.4s)", fileName.c_str(), ret, CONVERT_OSSTATUS(ret));

  pSound->ref_count = 1; // The caller holds a reference to this object now
  pSound->play_count = 0;
  
  return pSound;  
}
uint32 nuiAudioDecoder::ReadDE(std::vector<void*> buffers, uint32 sampleframes, nuiSampleBitFormat format)
{
  if (!mInitialized)
    return 0;
  
  SetPosition((uint32)mPosition);
  
  uint32 length = mInfo.GetSampleFrames();
  if (mPosition >= length)
    return 0;
  sampleframes = MIN(sampleframes, length - mPosition);
  
  uint32 channels = mInfo.GetChannels();
  if (buffers.size() != channels)
    return 0;
  
  std::vector<float*> temp(channels);
  for (uint32 c = 0; c < channels; c++)
  {
    if (format == eSampleFloat32)
      temp[c] = (float*)(buffers[c]);
    else
      temp[c] = new float[sampleframes];
  }
  
  //
  uint64 BytesToRead = SampleFramesToBytes(sampleframes);
  uint32 listSize = sizeof(AudioBufferList) + sizeof(AudioBuffer)* (channels-1);
  AudioBufferList* pBufList = reinterpret_cast<AudioBufferList*> (new Byte[listSize]);
  pBufList->mNumberBuffers = channels; // we query non-interleaved samples, so we need as many buffers as channels
  for (uint32 c = 0; c < pBufList->mNumberBuffers; c++)
  {
    // each AudioBuffer represents one channel (non-interleaved samples)
    pBufList->mBuffers[c].mNumberChannels   = 1;
    pBufList->mBuffers[c].mDataByteSize     = BytesToRead / channels;
    pBufList->mBuffers[c].mData             = temp[c];
  }
  
  //
  UInt32 frames = sampleframes;
  OSStatus err = ExtAudioFileRead(mpPrivate->mExtAudioFileRef, &frames, pBufList);
  if (err != noErr)
    frames = 0;
  
  if (format == eSampleInt16)
  {
    for (uint32 c = 0; c < channels; c++)
    {
      if (err != noErr)
      {
        nuiAudioConvert_FloatBufferTo16bits(temp[c], (int16*)(buffers[c]), frames);
      }
      delete[] temp[c];
    }
  }
  
  delete pBufList;
  
  mPosition += frames;
  return frames;
}
Exemple #23
0
PassOwnPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
{
    if (!m_extAudioFileRef)
        return nullptr;

    // Get file's data format
    UInt32 size = sizeof(m_fileDataFormat);
    OSStatus result = ExtAudioFileGetProperty(m_extAudioFileRef, kExtAudioFileProperty_FileDataFormat, &size, &m_fileDataFormat);
    if (result != noErr)
        return nullptr;

    // Number of channels
    size_t numberOfChannels = m_fileDataFormat.mChannelsPerFrame;

    // Number of frames
    SInt64 numberOfFrames64 = 0;
    size = sizeof(numberOfFrames64);
    result = ExtAudioFileGetProperty(m_extAudioFileRef, kExtAudioFileProperty_FileLengthFrames, &size, &numberOfFrames64);
    if (result != noErr)
        return nullptr;

    // Sample-rate
    double fileSampleRate = m_fileDataFormat.mSampleRate;

    // Make client format same number of channels as file format, but tweak a few things.
    // Client format will be linear PCM (canonical), and potentially change sample-rate.
    m_clientDataFormat = m_fileDataFormat;

    m_clientDataFormat.mFormatID = kAudioFormatLinearPCM;
    m_clientDataFormat.mFormatFlags = kAudioFormatFlagsCanonical;
    m_clientDataFormat.mBitsPerChannel = 8 * sizeof(AudioSampleType);
    m_clientDataFormat.mChannelsPerFrame = numberOfChannels;
    m_clientDataFormat.mFramesPerPacket = 1;
    m_clientDataFormat.mBytesPerPacket = sizeof(AudioSampleType);
    m_clientDataFormat.mBytesPerFrame = sizeof(AudioSampleType);
    m_clientDataFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;

    if (sampleRate)
        m_clientDataFormat.mSampleRate = sampleRate;

    result = ExtAudioFileSetProperty(m_extAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &m_clientDataFormat);
    if (result != noErr)
        return nullptr;

    // Change numberOfFrames64 to destination sample-rate
    numberOfFrames64 = numberOfFrames64 * (m_clientDataFormat.mSampleRate / fileSampleRate);
    size_t numberOfFrames = static_cast<size_t>(numberOfFrames64);

    size_t busChannelCount = mixToMono ? 1 : numberOfChannels;

    // Create AudioBus where we'll put the PCM audio data
    OwnPtr<AudioBus> audioBus = adoptPtr(new AudioBus(busChannelCount, numberOfFrames));
    audioBus->setSampleRate(narrowPrecisionToFloat(m_clientDataFormat.mSampleRate)); // save for later

    // Only allocated in the mixToMono case
    AudioFloatArray bufL;
    AudioFloatArray bufR;
    float* bufferL = 0;
    float* bufferR = 0;
    
    // Setup AudioBufferList in preparation for reading
    AudioBufferList* bufferList = createAudioBufferList(numberOfChannels);

    if (mixToMono && numberOfChannels == 2) {
        bufL.allocate(numberOfFrames);
        bufR.allocate(numberOfFrames);
        bufferL = bufL.data();
        bufferR = bufR.data();

        bufferList->mBuffers[0].mNumberChannels = 1;
        bufferList->mBuffers[0].mDataByteSize = numberOfFrames * sizeof(float);
        bufferList->mBuffers[0].mData = bufferL;

        bufferList->mBuffers[1].mNumberChannels = 1;
        bufferList->mBuffers[1].mDataByteSize = numberOfFrames * sizeof(float);
        bufferList->mBuffers[1].mData = bufferR;
    } else {
        ASSERT(!mixToMono || numberOfChannels == 1);

        // for True-stereo (numberOfChannels == 4)
        for (size_t i = 0; i < numberOfChannels; ++i) {
            bufferList->mBuffers[i].mNumberChannels = 1;
            bufferList->mBuffers[i].mDataByteSize = numberOfFrames * sizeof(float);
            bufferList->mBuffers[i].mData = audioBus->channel(i)->mutableData();
        }
    }

    // Read from the file (or in-memory version)
    UInt32 framesToRead = numberOfFrames;
    result = ExtAudioFileRead(m_extAudioFileRef, &framesToRead, bufferList);
    if (result != noErr) {
        destroyAudioBufferList(bufferList);
        return nullptr;
    }

    if (mixToMono && numberOfChannels == 2) {
        // Mix stereo down to mono
        float* destL = audioBus->channel(0)->mutableData();
        for (size_t i = 0; i < numberOfFrames; i++)
            destL[i] = 0.5f * (bufferL[i] + bufferR[i]);
    }

    // Cleanup
    destroyAudioBufferList(bufferList);

    return audioBus.release();
}
Exemple #24
0
static void* GetOpenALAudioData( KLAL* p,
	CFURLRef fileURL, ALsizei* dataSize, ALenum* dataFormat, ALsizei *sampleRate)
{
    OSStatus    	err;
    UInt32			size;
	UInt32			bufferSize;
    void*           data = NULL;
    AudioBufferList dataBuffer;
	
    // オーディオファイルを開く
    ExtAudioFileRef audioFile;
    err = ExtAudioFileOpenURL(fileURL, &audioFile);
    
	if (err)
	{
		KLLog("[KLAL] GetOpenALAudioData(A):%d\n",(int)err);
        goto Exit;
    }
	
	//CFRelease(fileURL);
	
    // オーディオデータフォーマットを取得する
    AudioStreamBasicDescription fileFormat;
    size = sizeof(fileFormat);
    err = ExtAudioFileGetProperty(audioFile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
    if (err)
	{
		KLLog("[KLAL] GetOpenALAudioData(B):%d\n",(int)err);
		goto Exit;
    }
	
    // アウトプットフォーマットを設定する
    AudioStreamBasicDescription outputFormat;
    outputFormat.mSampleRate = fileFormat.mSampleRate;
    outputFormat.mChannelsPerFrame = fileFormat.mChannelsPerFrame;
    outputFormat.mFormatID = kAudioFormatLinearPCM;
    outputFormat.mBytesPerPacket = 2 * outputFormat.mChannelsPerFrame;
    outputFormat.mFramesPerPacket = 1;
    outputFormat.mBytesPerFrame = 2 * outputFormat.mChannelsPerFrame;
    outputFormat.mBitsPerChannel = 16;//fileFormat.mBitsPerChannel;
    outputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
    
	p->wav.channel	= outputFormat.mChannelsPerFrame;
	p->wav.bit		= fileFormat.mBitsPerChannel;
	p->wav.freq		= fileFormat.mSampleRate;
	
	err = ExtAudioFileSetProperty(
								  audioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(outputFormat), &outputFormat);
    if (err)
	{
		KLLog("[KLAL] GetOpenALAudioData(C):%d\n",(int)err);
		goto Exit;
    }
	
    // フレーム数を取得する
    SInt64  fileLengthFrames = 0;
    size = sizeof(fileLengthFrames);
    err = ExtAudioFileGetProperty(
								  audioFile, kExtAudioFileProperty_FileLengthFrames, &size, &fileLengthFrames);
    if (err)
	{
		KLLog("[KLAL] GetOpenALAudioData(D):%d\n",(int)err);
		goto Exit;
	}
	
    // バッファを用意する
    bufferSize = (UInt32) fileLengthFrames * (UInt32) outputFormat.mBytesPerFrame;
    data = Malloc(bufferSize);
    dataBuffer.mNumberBuffers = 1;
    dataBuffer.mBuffers[0].mDataByteSize = bufferSize;
    dataBuffer.mBuffers[0].mNumberChannels = outputFormat.mChannelsPerFrame;
    dataBuffer.mBuffers[0].mData = data;
	p->wav.sz = bufferSize; 
	
    // バッファにデータを読み込む
    err = ExtAudioFileRead(audioFile, (UInt32*)&fileLengthFrames, &dataBuffer);
    if (err)
	{
		KLLog("[KLAL] GetOpenALAudioData(E):%d\n",(int)err);
		sfree(data);
		goto Exit;
    }
	
    // 出力値を設定する
    *dataSize = (ALsizei)bufferSize;
    *dataFormat = (outputFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
    *sampleRate = (ALsizei)outputFormat.mSampleRate;
	
Exit:
    // オーディオファイルを破棄する
    if (audioFile)
	{
		ExtAudioFileDispose(audioFile);
    }
		
    return data;
}