예제 #1
0
OSStatus AudioOutputVolumeDown(AudioDeviceID device, UInt32 *level) {
  Float32 right, left;
  OSStatus err = AudioOutputGetVolume(device, &left, &right);
  if (noErr == err) {
    Float32 max = left > right ? left : right;
    UInt32 lvl = __AudioOutputVolumeGetLevel(max);
    if (0 == lvl) {
      /* If not min level */
      if (fnonzero(max)) {
        err = _AudioOutputSetVolume(device, left, right, 0);
      }
    } else {
      lvl--;
      check(lvl <= kAudioOutputVolumeMaxLevel);
      err = _AudioOutputSetVolume(device, left, right, kAudioOutputVolumeLevels[lvl]);
    }
    if (level) *level = lvl;
  }
  return err;
}
bool	operator<(const AudioStreamBasicDescription& x, const AudioStreamBasicDescription& y)
{
	bool theAnswer = false;
	bool isDone = false;
	
	//	note that if either side is 0, that field is skipped
	
	//	format ID is the first order sort
	if((!isDone) && ((x.mFormatID != 0) && (y.mFormatID != 0)))
	{
		if(x.mFormatID != y.mFormatID)
		{
			//	formats are sorted numerically except that linear
			//	PCM is always first
			if(x.mFormatID == kAudioFormatLinearPCM)
			{
				theAnswer = true;
			}
			else if(y.mFormatID == kAudioFormatLinearPCM)
			{
				theAnswer = false;
			}
			else
			{
				theAnswer = x.mFormatID < y.mFormatID;
			}
			isDone = true;
		}
	}
	
	
	//  mixable is always better than non-mixable for linear PCM and should be the second order sort item
	if((!isDone) && ((x.mFormatID == kAudioFormatLinearPCM) && (y.mFormatID == kAudioFormatLinearPCM)))
	{
		if(((x.mFormatFlags & kIsNonMixableFlag) == 0) && ((y.mFormatFlags & kIsNonMixableFlag) != 0))
		{
			theAnswer = true;
			isDone = true;
		}
		else if(((x.mFormatFlags & kIsNonMixableFlag) != 0) && ((y.mFormatFlags & kIsNonMixableFlag) == 0))
		{
			theAnswer = false;
			isDone = true;
		}
	}
	
	//	floating point vs integer for linear PCM only
	if((!isDone) && ((x.mFormatID == kAudioFormatLinearPCM) && (y.mFormatID == kAudioFormatLinearPCM)))
	{
		if((x.mFormatFlags & kAudioFormatFlagIsFloat) != (y.mFormatFlags & kAudioFormatFlagIsFloat))
		{
			//	floating point is better than integer
			theAnswer = y.mFormatFlags & kAudioFormatFlagIsFloat;
			isDone = true;
		}
	}
	
	//	bit depth
	if((!isDone) && ((x.mBitsPerChannel != 0) && (y.mBitsPerChannel != 0)))
	{
		if(x.mBitsPerChannel != y.mBitsPerChannel)
		{
			//	deeper bit depths are higher quality
			theAnswer = x.mBitsPerChannel < y.mBitsPerChannel;
			isDone = true;
		}
	}
	
	//	sample rate
	if((!isDone) && fnonzero(x.mSampleRate) && fnonzero(y.mSampleRate))
	{
		if(fnotequal(x.mSampleRate, y.mSampleRate))
		{
			//	higher sample rates are higher quality
			theAnswer = x.mSampleRate < y.mSampleRate;
			isDone = true;
		}
	}
	
	//	number of channels
	if((!isDone) && ((x.mChannelsPerFrame != 0) && (y.mChannelsPerFrame != 0)))
	{
		if(x.mChannelsPerFrame != y.mChannelsPerFrame)
		{
			//	more channels is higher quality
			theAnswer = x.mChannelsPerFrame < y.mChannelsPerFrame;
			isDone = true;
		}
	}
	
	return theAnswer;
}
예제 #3
0
const AudioTimeStamp &	AUTimestampGenerator::GenerateInputTime(Float64 framesToAdvance, double inputSampleRate)
{
	if (mBypassed)
		return mCurrentOutputTime;

	double inputSampleTime;
	
	mCurrentInputTime.mFlags = kAudioTimeStampSampleTimeValid;
	double rateScalar = 1.0;
	
	// propagate rate scalar
	if (mCurrentOutputTime.mFlags & kAudioTimeStampRateScalarValid) {
		mCurrentInputTime.mFlags |= kAudioTimeStampRateScalarValid;
		mCurrentInputTime.mRateScalar = rateScalar = mCurrentOutputTime.mRateScalar;
	}
	
	// propagate host time and sample time
	if (mCurrentOutputTime.mFlags & kAudioTimeStampHostTimeValid) {
		mCurrentInputTime.mFlags |= kAudioTimeStampHostTimeValid;
		mCurrentInputTime.mHostTime = mCurrentOutputTime.mHostTime;
		if (mHostTimeDiscontinuityCorrection && mDiscontinuous && (mLastOutputTime.mFlags & kAudioTimeStampHostTimeValid)) {
			// we had a discontinuous output time, need to resync by interpolating 
			// a sample time that is appropriate to the host time
			UInt64 deltaHostTime = mCurrentOutputTime.mHostTime - mLastOutputTime.mHostTime;
			double deltaSeconds = double(deltaHostTime) * CAHostTimeBase::GetInverseFrequency();
			// samples/second * seconds = samples
			double deltaSamples = floor(inputSampleRate / rateScalar * deltaSeconds + 0.5);
			double lastInputSampleTime = mCurrentInputTime.mSampleTime;
			inputSampleTime = lastInputSampleTime + deltaSamples;
#if DEBUG
			if (mVerbosity > 1)
				printf("%-20.20s: adjusted input time: "TSGFMT" -> "TSGFMT" (SR=%.3f, rs=%.3f)\n", mDebugName, (SInt64)lastInputSampleTime, (SInt64)inputSampleTime, inputSampleRate, rateScalar);
#endif
			mDiscontinuous = false;
		} else {
			inputSampleTime = mNextInputSampleTime;
		}
	} else {
		// we don't know the host time, so we can't do much
		inputSampleTime = mNextInputSampleTime;
	}

	if (!mHostTimeDiscontinuityCorrection && fnonzero(mDiscontinuityDeltaSamples))
	{
		// we had a discontinuous output time, need to resync by propagating the
		// detected discontinuity, taking the rate scalar adjustment into account
		inputSampleTime += floor(mDiscontinuityDeltaSamples / mRateScalarAdj + 0.5);
		
#if DEBUG
		if (mVerbosity > 1)
			printf("%-20.20s: adjusted input time: %.0f -> %.0f (SR=%.3f, rs=%.3f, delta=%.0f)\n", mDebugName, mNextInputSampleTime, inputSampleTime, inputSampleRate, mRateScalarAdj, mDiscontinuityDeltaSamples);
#endif
		
		mDiscontinuityDeltaSamples = 0.;
	}
	
	
	// propagate word clock
	if (mCurrentOutputTime.mFlags & kAudioTimeStampWordClockTimeValid) {
		mCurrentInputTime.mFlags |= kAudioTimeStampWordClockTimeValid;
		mCurrentInputTime.mWordClockTime = mCurrentOutputTime.mWordClockTime;
	}
	
	// propagate SMPTE time
	if (mCurrentOutputTime.mFlags & kAudioTimeStampSMPTETimeValid) {
		mCurrentInputTime.mFlags |= kAudioTimeStampSMPTETimeValid;
		mCurrentInputTime.mSMPTETime = mCurrentOutputTime.mSMPTETime;
	}
	
	// store the input sample time and expected next input time
	mCurrentInputTime.mSampleTime = inputSampleTime;
	mNextInputSampleTime = inputSampleTime + framesToAdvance;

#if DEBUG
	if (mVerbosity > 0) {
		printf("%-20.20s: out = "TSGFMT" (%10.3fs)  in = "TSGFMT"  (%10.3fs)  delta = "TSGFMT"  advance = "TSGFMT"\n", mDebugName, (SInt64)mCurrentOutputTime.mSampleTime, DebugHostTime(mCurrentOutputTime), (SInt64)inputSampleTime, DebugHostTime(mCurrentInputTime), (SInt64)(mCurrentOutputTime.mSampleTime - inputSampleTime), (SInt64)framesToAdvance);
	}
#endif
	return mCurrentInputTime;
}