Beispiel #1
0
static PaTime GetStreamTime( PaStream *s )
{
    OSStatus err;
    PaTime result;
    PaMacCoreStream *stream = (PaMacCoreStream*)s;

    AudioTimeStamp *timeStamp = PaUtil_AllocateMemory(sizeof(AudioTimeStamp));
    if (stream->inputDevice != kAudioDeviceUnknown) {
        err = AudioDeviceGetCurrentTime(stream->inputDevice, timeStamp);
    }
    else {
        err = AudioDeviceGetCurrentTime(stream->outputDevice, timeStamp);
    }
    
    result = err ? 0 : timeStamp->mSampleTime;
    PaUtil_FreeMemory(timeStamp);

    return result;
}
Beispiel #2
0
// insert up to frameCount (and no less than frameCount/2 -- see SoundPlayer
// class>>startPlayingImmediately: for the [bogus] reasons why) frames into
// the front and back buffers, leaving some number of framesOfLeadTime
// intact before starging the insertion.  (this last parameter is
// meaningless for us and could be reduced to zero, but ignoring it causes
// strange things to happen.  time to rething the image code, methinks.)
// 
// Note: this is only used when the "sound quick start" preference is
// enabled in the image.
// 
static sqInt sound_InsertSamplesFromLeadTime(sqInt frameCount, sqInt srcBufPtr, sqInt framesOfLeadTime)
{
  Stream *s= output;

  debugf("snd_InsertSamples %d From %p LeadTime %d\n", frameCount, srcBufPtr, framesOfLeadTime);

  if (s)
    {
      // data already sent to the device is lost forever, although latency
      // is only a few hundred frames (and is certainly much lower than the
      // standard value of `framesOfLeadTime').  instead of putzing around
      // why not just mix the samples in right away, leaving one h/w
      // buffer's worth of lead time in case we're interrupted in the
      // middle?

      char *frontData=   0, *backData=   0;
      int   frontFrames= 0,  backFrames= 0;
      int   framesDone=  0;
      int   leadBytes;

#    if (OBEY_LEAD_TIME)
      {
	AudioTimeStamp timeStamp;
	u_int64_t      then, now;

	timeStamp.mFlags= kAudioTimeStampHostTimeValid;
	checkError(AudioDeviceGetCurrentTime(s->id, &timeStamp),
		   "AudioDeviceGetCurrentTime", "");
	now= AudioConvertHostTimeToNanos(timeStamp.mHostTime) / 1000ull;
	then= s->timestamp;
	leadBytes= ( ((now - then) * (u_int64_t)s->sampleRate) / 1000000ull
		     + framesOfLeadTime ) * SqueakFrameSize;
      }
#    else
      {
	leadBytes= s->devBufSize;	// quantum shipped to the hardware
      }
#    endif

      {
	int   availBytes;
	int   byteCount= frameCount * SqueakFrameSize;
	Buffer_getOutputPointers(s->buffer,
				 &frontData, &frontFrames,	// bytes!
				 &backData,  &backFrames);	// bytes!
	availBytes= frontFrames + backFrames;
	// don't consume more than frameCount - 1 frames
	leadBytes= max(leadBytes, availBytes - byteCount + SqueakFrameSize);

	assert((availBytes - leadBytes) <  (byteCount));

	if (leadBytes < frontFrames)	// skip leadBytes into first fragment
	  {
	    frontData   += leadBytes;
	    frontFrames -= leadBytes;
	  }
	else				// omit the first fragment
	  {
	    leadBytes -= frontFrames;	// lead in second fragment
	    frontFrames= 0;
	    backData   += leadBytes;	// skip leadBytes into second fragment
	    backFrames -= leadBytes;
	  }
	frontFrames /= SqueakFrameSize;
	backFrames  /= SqueakFrameSize;
      }

      assert((frontFrames + backFrames) < frameCount);	// avoid bug in image

      if ((frontFrames + backFrames) >= (frameCount / 2))
	{
	  mixFrames((short *)frontData, (short *)pointerForOop(srcBufPtr), frontFrames);
	  srcBufPtr += frontFrames * SqueakFrameSize;
	  mixFrames((short *)backData,  (short *)pointerForOop(srcBufPtr), backFrames);
	  framesDone= frontFrames + backFrames;
	}
      return framesDone;
    }

  success(false);
  return frameCount;
}
Beispiel #3
0
OSStatus CAPlayThrough::OutputProc(void *inRefCon,
									 AudioUnitRenderActionFlags *ioActionFlags,
									 const AudioTimeStamp *TimeStamp,
									 UInt32 inBusNumber,
									 UInt32 inNumberFrames,
									 AudioBufferList * ioData)
{
    OSStatus err = noErr;
	CAPlayThrough *This = (CAPlayThrough *)inRefCon;
	Float64 rate = 0.0;
	AudioTimeStamp inTS, outTS;
		
	if (This->mFirstInputTime < 0.) {
		// input hasn't run yet -> silence
		MakeBufferSilent (ioData);
		return noErr;
	}
	
	//use the varispeed playback rate to offset small discrepancies in sample rate
	//first find the rate scalars of the input and output devices
	err = AudioDeviceGetCurrentTime(This->mInputDevice.mID, &inTS);
	// this callback may still be called a few times after the device has been stopped
	if (err)
	{
		MakeBufferSilent (ioData);
		return noErr;
	}
		
	err = AudioDeviceGetCurrentTime(This->mOutputDevice.mID, &outTS);
	checkErr(err);
	
	rate = inTS.mRateScalar / outTS.mRateScalar;
	err = AudioUnitSetParameter(This->mVarispeedUnit,kVarispeedParam_PlaybackRate,kAudioUnitScope_Global,0, rate,0);
	checkErr(err);
	
	//get Delta between the devices and add it to the offset
	if (This->mFirstOutputTime < 0.) {
		This->mFirstOutputTime = TimeStamp->mSampleTime;
		Float64 delta = (This->mFirstInputTime - This->mFirstOutputTime);
		This->ComputeThruOffset();   
		//changed: 3865519 11/10/04
		if (delta < 0.0)
			This->mInToOutSampleOffset -= delta;
		else
			This->mInToOutSampleOffset = -delta + This->mInToOutSampleOffset;
					
		MakeBufferSilent (ioData);
		return noErr;
	}

	//copy the data from the buffers	
	err = This->mBuffer->Fetch(ioData, inNumberFrames, SInt64(TimeStamp->mSampleTime - This->mInToOutSampleOffset));
	//old line of code different once ring buffer is replaced
	//err = This->mBuffer->Fetch(ioData, inNumberFrames, SInt64(TimeStamp->mSampleTime - This->mInToOutSampleOffset), false);	
	if(err != kCARingBufferError_OK)
	{
		MakeBufferSilent (ioData);
		SInt64 bufferStartTime, bufferEndTime;
		This->mBuffer->GetTimeBounds(bufferStartTime, bufferEndTime);
		This->mInToOutSampleOffset = TimeStamp->mSampleTime - bufferStartTime;
	}

	return noErr;
}
Beispiel #4
0
void	CAHALAudioDevice::GetCurrentTime(AudioTimeStamp& outTime)
{
	OSStatus theError = AudioDeviceGetCurrentTime(mObjectID, &outTime);
	ThrowIfError(theError, CAException(theError), "CAHALAudioDevice::GetCurrentTime: got an error getting the current time");
}
OSStatus CAPlayThrough::OutputProc(void *inRefCon,
									 AudioUnitRenderActionFlags *ioActionFlags,
									 const AudioTimeStamp *TimeStamp,
									 UInt32 inBusNumber,
									 UInt32 inNumberFrames,
									 AudioBufferList * ioData)
{
    OSStatus err = noErr;
	CAPlayThrough *This = (CAPlayThrough *)inRefCon;
	Float64 rate = 0.0;
	AudioTimeStamp inTS, outTS;
		
	if (This->mFirstInputTime < 0.) {
		// input hasn't run yet -> silence
		MakeBufferSilent (ioData);
		return noErr;
	}
	
	//use the varispeed playback rate to offset small discrepancies in sample rate
	//first find the rate scalars of the input and output devices
	err = AudioDeviceGetCurrentTime(This->mInputDevice.mID, &inTS);
	// this callback may still be called a few times after the device has been stopped
	if (err)
	{
		MakeBufferSilent (ioData);
		return noErr;
	}
		
	err = AudioDeviceGetCurrentTime(This->mOutputDevice.mID, &outTS);
	checkErr(err);
	
	rate = inTS.mRateScalar / outTS.mRateScalar;
	err = AudioUnitSetParameter(This->mVarispeedUnit,kVarispeedParam_PlaybackRate,kAudioUnitScope_Global,0, rate,0);
	checkErr(err);
	
	//get Delta between the devices and add it to the offset
	if (This->mFirstOutputTime < 0.) {
		This->mFirstOutputTime = TimeStamp->mSampleTime;
		Float64 delta = (This->mFirstInputTime - This->mFirstOutputTime);
		This->ComputeThruOffset();   
		//changed: 3865519 11/10/04
		if (delta < 0.0)
			This->mInToOutSampleOffset -= delta;
		else
			This->mInToOutSampleOffset = -delta + This->mInToOutSampleOffset;
		
        CAPT_DEBUG( "Set initial IOOffset to %f.\n", This->mInToOutSampleOffset );
                                			
		MakeBufferSilent (ioData);
		return noErr;
	}

	//copy the data from the buffers	
	err = This->mBuffer->Fetch(ioData, inNumberFrames, SInt64(TimeStamp->mSampleTime - This->mInToOutSampleOffset));	
	if( err != kCARingBufferError_OK ) {
        SInt64 bufferStartTime, bufferEndTime;
		This->mBuffer->GetTimeBounds( bufferStartTime, bufferEndTime );
        CAPT_DEBUG( "Oops. Adjusting IOOffset from %f, ", This->mInToOutSampleOffset );
        if ( err < kCARingBufferError_OK ) {
            CAPT_DEBUG( "ahead " );
            if ( err == kCARingBufferError_WayBehind ) {
                MakeBufferSilent( ioData );
            }
            This->mInToOutSampleOffset += std::max( ( TimeStamp->mSampleTime - This->mInToOutSampleOffset ) - bufferStartTime, kAdjustmentOffsetSamples );
        }
        else if ( err > kCARingBufferError_OK ) {
            CAPT_DEBUG( "behind " );
            if ( err == kCARingBufferError_WayAhead ) {
                MakeBufferSilent( ioData );
            }
            // Adjust by the amount that we read past in the buffer
            This->mInToOutSampleOffset += std::max( ( ( TimeStamp->mSampleTime - This->mInToOutSampleOffset ) + inNumberFrames ) - bufferEndTime, kAdjustmentOffsetSamples );
        }
        CAPT_DEBUG( "to %f.\n", This->mInToOutSampleOffset );
		MakeBufferSilent ( ioData );
	}

	return noErr;
}