//==============================================================================
    bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
                      int64 startSampleInFile, int numSamples) override
    {
        clearSamplesBeyondAvailableLength (destSamples, numDestChannels, startOffsetInDestBuffer,
                                           startSampleInFile, numSamples, lengthInSamples);

        if (numSamples <= 0)
            return true;

        if (lastReadPosition != startSampleInFile)
        {
            OSStatus status = ExtAudioFileSeek (audioFileRef, startSampleInFile);
            if (status != noErr)
                return false;

            lastReadPosition = startSampleInFile;
        }

        while (numSamples > 0)
        {
            const int numThisTime = jmin (8192, numSamples);
            const size_t numBytes = sizeof (float) * (size_t) numThisTime;

            audioDataBlock.ensureSize (numBytes * numChannels, false);
            float* data = static_cast<float*> (audioDataBlock.getData());

            for (int j = (int) numChannels; --j >= 0;)
            {
                bufferList->mBuffers[j].mNumberChannels = 1;
                bufferList->mBuffers[j].mDataByteSize = (UInt32) numBytes;
                bufferList->mBuffers[j].mData = data;
                data += numThisTime;
            }

            UInt32 numFramesToRead = (UInt32) numThisTime;
            OSStatus status = ExtAudioFileRead (audioFileRef, &numFramesToRead, bufferList);
            if (status != noErr)
                return false;

            for (int i = numDestChannels; --i >= 0;)
            {
                if (destSamples[i] != nullptr)
                {
                    if (i < (int) numChannels)
                        memcpy (destSamples[i] + startOffsetInDestBuffer, bufferList->mBuffers[i].mData, numBytes);
                    else
                        zeromem (destSamples[i] + startOffsetInDestBuffer, numBytes);
                }
            }

            startOffsetInDestBuffer += numThisTime;
            numSamples -= numThisTime;
            lastReadPosition += numThisTime;
        }

        return true;
    }
void ExtAudioFileAudioSource::setDecoderPosition(Int64 startFrame)
{
	RScopedLock l(&mDecodeLock);

	RPRINT("setting decoder position\n");
    ExtAudioFileSeek(mAudioFile, startFrame);  
	if(startFrame < getLength() * getSampleRate())
		mEOF = false;
}
Esempio n. 3
0
static int CoreAudio_rewind(Sound_Sample *sample)
{
	OSStatus error_result = noErr;	
	Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
	CoreAudioFileContainer* core_audio_file_container = (CoreAudioFileContainer *) internal->decoder_private;
	
	error_result = ExtAudioFileSeek(core_audio_file_container->extAudioFileRef, 0);
	if(error_result != noErr)
	{
		sample->flags |= SOUND_SAMPLEFLAG_ERROR;
	}
	return(1);
} /* CoreAudio_rewind */
Esempio n. 4
0
SInt64 SFB::Audio::CoreAudioDecoder::_SeekToFrame(SInt64 frame)
{
	OSStatus result = ExtAudioFileSeek(mExtAudioFile, frame);
	if(noErr != result) {
		LOGGER_ERR("org.sbooth.AudioEngine.Decoder.CoreAudio", "ExtAudioFileSeek failed: " << result);
		return -1;
	}

	if(mUseM4AWorkarounds)
		mCurrentFrame = frame;

	return _GetCurrentFrame();
}
Esempio n. 5
0
/* Note: I found this tech note:
 http://developer.apple.com/library/mac/#qa/qa2009/qa1678.html
 I don't know if this applies to us. So far, I haven't noticed the problem,
 so I haven't applied any of the techniques.
 */
static int CoreAudio_seek(Sound_Sample *sample, Uint32 ms)
{
	OSStatus error_result = noErr;	
	Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
	CoreAudioFileContainer* core_audio_file_container = (CoreAudioFileContainer *) internal->decoder_private;
	SInt64 frame_offset = 0;
	AudioStreamBasicDescription	actual_format;
	UInt32 format_size;

	
	/* I'm confused. The Apple documentation says this:
	"Seek position is specified in the sample rate and frame count of the file’s audio data format
	— not your application’s audio data format."
	My interpretation is that I want to get the "actual format of the file and compute the frame offset.
	But when I try that, seeking goes to the wrong place.
	When I use outputFormat, things seem to work correctly.
	I must be misinterpreting the documentation or doing something wrong.
	*/
#if 0 /* not working */
    format_size = sizeof(AudioStreamBasicDescription);
    error_result = AudioFileGetProperty(
		*core_audio_file_container->audioFileID,
		kAudioFilePropertyDataFormat,
		&format_size,
		&actual_format
	);
    if(error_result != noErr)
	{
		sample->flags |= SOUND_SAMPLEFLAG_ERROR;
		BAIL_MACRO("Core Audio: Could not GetProperty for kAudioFilePropertyDataFormat.", 0);
	} /* if */

	// packetIndex = (pos * sampleRate) / framesPerPacket
	//	frame_offset = (SInt64)((ms/1000.0 * actual_format.mSampleRate) / actual_format.mFramesPerPacket);
#else /* seems to work, but I'm confused */
	// packetIndex = (pos * sampleRate) / framesPerPacket
	frame_offset = (SInt64)((ms/1000.0 * core_audio_file_container->outputFormat->mSampleRate) / core_audio_file_container->outputFormat->mFramesPerPacket);	
#endif

	// computed against actual format and not the client format
	error_result = ExtAudioFileSeek(core_audio_file_container->extAudioFileRef, frame_offset);
	if(error_result != noErr)
	{
		sample->flags |= SOUND_SAMPLEFLAG_ERROR;
	}
	
	return(1);
} /* CoreAudio_seek */
int AudioDecoderCoreAudio::seek(int sampleIdx) {
    OSStatus err = noErr;
    SInt64 segmentStart = sampleIdx / 2;

    err = ExtAudioFileSeek(m_audioFile, (SInt64)segmentStart+m_headerFrames);
    //_ThrowExceptionIfErr(@"ExtAudioFileSeek", err);
	//qDebug() << "SSCA: Seeking to" << segmentStart;

	//err = ExtAudioFileSeek(m_audioFile, sampleIdx / 2);		
	if (err != noErr)
	{
        std::cerr << "AudioDecoderCoreAudio: Error seeking to sample " << sampleIdx << std::endl;
	}

    m_iPositionInSamples = sampleIdx;

    return m_iPositionInSamples; //filepos;
}
Esempio n. 7
0
long SoundSourceCoreAudio::seek(long filepos) {
    // important division here, filepos is in audio samples (i.e. shorts)
    // but libflac expects a number in time samples. I _think_ this should
    // be hard-coded at two because *2 is the assumption the caller makes
    // -- bkgood
    OSStatus err = noErr;
    SInt64 segmentStart = filepos / 2;

    err = ExtAudioFileSeek(m_audioFile, (SInt64)segmentStart+m_headerFrames);
    //_ThrowExceptionIfErr(@"ExtAudioFileSeek", err);
    //qDebug() << "SSCA: Seeking to" << segmentStart;

	//err = ExtAudioFileSeek(m_audioFile, filepos / 2);
    if (err != noErr) {
        qDebug() << "SSCA: Error seeking to" << filepos << " (file " << m_qFilename << ")";// << GetMacOSStatusErrorString(err) << GetMacOSStatusCommentString(err);
	}
    return filepos;
}
Esempio n. 8
0
SINT SoundSourceCoreAudio::seekSampleFrame(SINT frameIndex) {
    DEBUG_ASSERT(isValidFrameIndex(frameIndex));

    // See comments above on kMp3StabilizationFrames.
    const SINT stabilization_frames = m_bFileIsMp3 ? math_min<SINT>(
            kMp3StabilizationFrames, frameIndex + m_headerFrames) : 0;
    OSStatus err = ExtAudioFileSeek(
            m_audioFile, frameIndex + m_headerFrames - stabilization_frames);
    if (stabilization_frames > 0) {
        readSampleFrames(stabilization_frames,
                         &kMp3StabilizationScratchBuffer[0]);
    }

    //_ThrowExceptionIfErr(@"ExtAudioFileSeek", err);
    //qDebug() << "SSCA: Seeking to" << frameIndex;
    if (err != noErr) {
        qDebug() << "SSCA: Error seeking to" << frameIndex; // << GetMacOSStatusErrorString(err) << GetMacOSStatusCommentString(err);
    }
    return frameIndex;
}
Esempio n. 9
0
void ExtAFSource::seekTo(int64_t count)
{
    int npreroll = 0;
    
    switch (m_iasbd.mFormatID) {
    case kAudioFormatMPEGLayer1: npreroll = 1; break;
    case kAudioFormatMPEGLayer2: npreroll = 1; break;
    case kAudioFormatMPEGLayer3: npreroll = 10; break;
    }
    int64_t off
        = std::max(0LL, count - m_iasbd.mFramesPerPacket * npreroll);
    CHECKCA(ExtAudioFileSeek(m_eaf, off));
    int32_t distance = count - off;
    while (distance > 0) {
        size_t nbytes = distance * m_asbd.mBytesPerFrame;
        if (nbytes > m_buffer.size())
            m_buffer.resize(nbytes);
        size_t n = readSamples(&m_buffer[0], distance);
        if (n <= 0) break;
        distance -= n;
    }
}
OSStatus DoConvertFile(CFURLRef sourceURL, CFURLRef destinationURL, OSType outputFormat, Float64 outputSampleRate) 
{
    ExtAudioFileRef sourceFile = 0;
    ExtAudioFileRef destinationFile = 0;
    Boolean         canResumeFromInterruption = true; // we can continue unless told otherwise
    OSStatus        error = noErr;
    
    // in this sample we should never be on the main thread here
    assert(![NSThread isMainThread]);
    
    // transition thread state to kStateRunning before continuing
    ThreadStateSetRunning();
    
    printf("DoConvertFile\n");
    
	try {
        CAStreamBasicDescription srcFormat, dstFormat;

        // open the source file
        XThrowIfError(ExtAudioFileOpenURL(sourceURL, &sourceFile), "ExtAudioFileOpenURL failed");
			
        // get the source data format
		UInt32 size = sizeof(srcFormat);
		XThrowIfError(ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &size, &srcFormat), "couldn't get source data format");
		
		printf("\nSource file format: "); srcFormat.Print();

        // setup the output file format
        dstFormat.mSampleRate = (outputSampleRate == 0 ? srcFormat.mSampleRate : outputSampleRate); // set sample rate
        if (outputFormat == kAudioFormatLinearPCM) {
            // if PCM was selected as the destination format, create a 16-bit int PCM file format description
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame = srcFormat.NumberChannels();
            dstFormat.mBitsPerChannel = 16;
            dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
            dstFormat.mFramesPerPacket = 1;
            dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
        } else {
            // compressed format - need to set at least format, sample rate and channel fields for kAudioFormatProperty_FormatInfo
            dstFormat.mFormatID = outputFormat;
            dstFormat.mChannelsPerFrame =  (outputFormat == kAudioFormatiLBC ? 1 : srcFormat.NumberChannels()); // for iLBC num channels must be 1
            
            // use AudioFormat API to fill out the rest of the description
            size = sizeof(dstFormat);
            XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &dstFormat), "couldn't create destination data format");
        }
        
        printf("\nDestination file format: "); dstFormat.Print();
        
        // create the destination file 
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, NULL, kAudioFileFlags_EraseFile, &destinationFile), "ExtAudioFileCreateWithURL failed!");

        // set the client format - The format must be linear PCM (kAudioFormatLinearPCM)
        // You must set this in order to encode or decode a non-PCM file data format
        // You may set this on PCM files to specify the data format used in your calls to read/write
        CAStreamBasicDescription clientFormat;
        if (outputFormat == kAudioFormatLinearPCM) {
            clientFormat = dstFormat;
        } else {
            clientFormat.SetCanonical(srcFormat.NumberChannels(), true);
            clientFormat.mSampleRate = srcFormat.mSampleRate;
        }
        
        printf("\nClient data format: "); clientFormat.Print();
        printf("\n");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set source client format");
        
        size = sizeof(clientFormat);
        XThrowIfError(ExtAudioFileSetProperty(destinationFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat), "couldn't set destination client format");

        // can the audio converter (which in this case is owned by an ExtAudioFile object) resume conversion after an interruption?
        AudioConverterRef audioConverter;
                    
        size = sizeof(audioConverter);
        XThrowIfError(ExtAudioFileGetProperty(destinationFile, kExtAudioFileProperty_AudioConverter, &size, &audioConverter), "Couldn't get Audio Converter!");
        
        // this property may be queried at any time after construction of the audio converter (which in this case is owned by an ExtAudioFile object)
        // after setting the output format -- there's no clear reason to prefer construction time, interruption time, or potential resumption time but we prefer
        // construction time since it means less code to execute during or after interruption time
        UInt32 canResume = 0;
        size = sizeof(canResume);
        error = AudioConverterGetProperty(audioConverter, kAudioConverterPropertyCanResumeFromInterruption, &size, &canResume);
        if (noErr == error) {
            // we recieved a valid return value from the GetProperty call
            // if the property's value is 1, then the codec CAN resume work following an interruption
            // if the property's value is 0, then interruptions destroy the codec's state and we're done
            
            if (0 == canResume) canResumeFromInterruption = false;
            
            printf("Audio Converter %s continue after interruption!\n", (canResumeFromInterruption == 0 ? "CANNOT" : "CAN"));
        } else {
            // if the property is unimplemented (kAudioConverterErr_PropertyNotSupported, or paramErr returned in the case of PCM),
            // then the codec being used is not a hardware codec so we're not concerned about codec state
            // we are always going to be able to resume conversion after an interruption
            
            if (kAudioConverterErr_PropertyNotSupported == error) {
                printf("kAudioConverterPropertyCanResumeFromInterruption property not supported!\n");
            } else {
                printf("AudioConverterGetProperty kAudioConverterPropertyCanResumeFromInterruption result %ld\n", error);
            }
            
            error = noErr;
        }
        
        // set up buffers
        UInt32 bufferByteSize = 32768;
        char srcBuffer[bufferByteSize];
        
        // keep track of the source file offset so we know where to reset the source for
        // reading if interrupted and input was not consumed by the audio converter
        SInt64 sourceFrameOffset = 0;
        
        //***** do the read and write - the conversion is done on and by the write call *****//
        printf("Converting...\n");
        while (1) {
        
            AudioBufferList fillBufList;
            fillBufList.mNumberBuffers = 1;
            fillBufList.mBuffers[0].mNumberChannels = clientFormat.NumberChannels();
            fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
            fillBufList.mBuffers[0].mData = srcBuffer;
                
            // client format is always linear PCM - so here we determine how many frames of lpcm
            // we can read/write given our buffer size
            UInt32 numFrames;
            if (clientFormat.mBytesPerFrame > 0) // rids bogus analyzer div by zero warning mBytesPerFrame can't be 0 and is protected by an Assert
                numFrames = clientFormat.BytesToFrames(bufferByteSize); // (bufferByteSize / clientFormat.mBytesPerFrame);

            XThrowIfError(ExtAudioFileRead(sourceFile, &numFrames, &fillBufList), "ExtAudioFileRead failed!");	
            if (!numFrames) {
                // this is our termination condition
                error = noErr;
                break;
            }
            sourceFrameOffset += numFrames;
            
            // this will block if we're interrupted
            Boolean wasInterrupted = ThreadStatePausedCheck();
            
            if ((error || wasInterrupted) && (false == canResumeFromInterruption)) {
                // this is our interruption termination condition
                // an interruption has occured but the audio converter cannot continue
                error = kMyAudioConverterErr_CannotResumeFromInterruptionError;
                break;
            }

            error = ExtAudioFileWrite(destinationFile, numFrames, &fillBufList);
            // if interrupted in the process of the write call, we must handle the errors appropriately
            if (error) {
                if (kExtAudioFileError_CodecUnavailableInputConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was successfully
                        consumed and you may proceed to the next buffer
                    */
                    
                } else if (kExtAudioFileError_CodecUnavailableInputNotConsumed == error) {
                
                    printf("ExtAudioFileWrite kExtAudioFileError_CodecUnavailableInputNotConsumed error %ld\n", error);
                    
                    /*
                        Returned when ExtAudioFileWrite was interrupted. You must stop calling
                        ExtAudioFileWrite. If the underlying audio converter can resume after an
                        interruption (see kAudioConverterPropertyCanResumeFromInterruption), you must
                        wait for an EndInterruption notification from AudioSession, then activate the session
                        before resuming. In this situation, the buffer you provided to ExtAudioFileWrite was not
                        successfully consumed and you must try to write it again
                    */
                    
                    // seek back to last offset before last read so we can try again after the interruption
                    sourceFrameOffset -= numFrames;
                    XThrowIfError(ExtAudioFileSeek(sourceFile, sourceFrameOffset), "ExtAudioFileSeek failed!");
                    
                } else {
                    XThrowIfError(error, "ExtAudioFileWrite error!");
                }
            } // if
        } // while
	}
    catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        error = e.mError;
	}
    
    // close
    if (destinationFile) ExtAudioFileDispose(destinationFile);
    if (sourceFile) ExtAudioFileDispose(sourceFile);

    // transition thread state to kStateDone before continuing
    ThreadStateSetDone();
    
    return error;
}
Esempio n. 11
0
bool nuiAudioDecoder::Seek(uint64 SampleFrame)
{
  OSStatus err = ExtAudioFileSeek(mpPrivate->mExtAudioFileRef, SampleFrame);
  return (err == noErr);
}
Esempio n. 12
0
uint_t aubio_source_apple_audio_seek (aubio_source_apple_audio_t * s, uint_t pos) {
  SInt64 resampled_pos = (SInt64)ROUND( pos * s->source_samplerate * 1. / s->samplerate );
  OSStatus err = ExtAudioFileSeek(s->audioFile, resampled_pos);
  if (err) AUBIO_ERROR("source_apple_audio: error in ExtAudioFileSeek (%d)\n", (int)err);
  return err;
}