OSStatus CAPlayThrough::Init(AudioDeviceID input, AudioDeviceID output) { OSStatus err = noErr; //Note: You can interface to input and output devices with "output" audio units. //Please keep in mind that you are only allowed to have one output audio unit per graph (AUGraph). //As you will see, this sample code splits up the two output units. The "output" unit that will //be used for device input will not be contained in a AUGraph, while the "output" unit that will //interface the default output device will be in a graph. //Setup AUHAL for an input device err = SetupAUHAL(input); checkErr(err); //Setup Graph containing Varispeed Unit & Default Output Unit err = SetupGraph(output); checkErr(err); err = SetupBuffers(); checkErr(err); // the varispeed unit should only be conected after the input and output formats have been set err = AUGraphConnectNodeInput(mGraph, mVarispeedNode, 0, mOutputNode, 0); checkErr(err); err = AUGraphInitialize(mGraph); checkErr(err); //Add latency between the two devices ComputeThruOffset(); return err; }
void AudioThruEngine::Start() { if (mRunning) return; if (!mInputDevice.Valid() || !mOutputDevice.Valid()) { //printf("invalid device\n"); return; } // $$$ should do some checks on the format/sample rate matching if (mInputDevice.mFormat.mSampleRate != mOutputDevice.mFormat.mSampleRate) { if (MatchSampleRate(false)) { printf("Error - sample rate mismatch: %f / %f\n", mInputDevice.mFormat.mSampleRate, mOutputDevice.mFormat.mSampleRate); return; } } /*if (mInputDevice.mFormat.mChannelsPerFrame != mOutputDevice.mFormat.mChannelsPerFrame || mInputDevice.mFormat.mBytesPerFrame != mOutputDevice.mFormat.mBytesPerFrame) { printf("Error - format mismatch: %ld / %ld channels, %ld / %ld bytes per frame\n", mInputDevice.mFormat.mChannelsPerFrame, mOutputDevice.mFormat.mChannelsPerFrame, mInputDevice.mFormat.mBytesPerFrame, mOutputDevice.mFormat.mBytesPerFrame); return; }*/ //mErrorMessage[0] = '\0'; mInputBuffer->Allocate(mInputDevice.mFormat.mBytesPerFrame, UInt32(kSecondsInRingBuffer * mInputDevice.mFormat.mSampleRate)); mSampleRate = mInputDevice.mFormat.mSampleRate; mWorkBuf = new Byte[mInputDevice.mBufferSizeFrames * mInputDevice.mFormat.mBytesPerFrame]; memset(mWorkBuf, 0, mInputDevice.mBufferSizeFrames * mInputDevice.mFormat.mBytesPerFrame); mRunning = true; #if USE_AUDIODEVICEREAD UInt32 streamListSize; verify_noerr (AudioDeviceGetPropertyInfo(gInputDevice, 0, true, kAudioDevicePropertyStreams, &streamListSize, NULL)); UInt32 nInputStreams = streamListSize / sizeof(AudioStreamID); propsize = offsetof(AudioBufferList, mBuffers[nInputStreams]); gInputIOBuffer = (AudioBufferList *)malloc(propsize); verify_noerr (AudioDeviceGetProperty(gInputDevice, 0, true, kAudioDevicePropertyStreamConfiguration, &propsize, gInputIOBuffer)); gInputIOBuffer->mBuffers[0].mData = malloc(gInputIOBuffer->mBuffers[0].mDataByteSize); verify_noerr (AudioDeviceSetProperty(gInputDevice, NULL, 0, true, kAudioDevicePropertyRegisterBufferList, propsize, gInputIOBuffer)); #endif mInputProcState = kStarting; mOutputProcState = kStarting; verify_noerr (AudioDeviceAddIOProc(mInputDevice.mID, InputIOProc, this)); verify_noerr (AudioDeviceStart(mInputDevice.mID, InputIOProc)); if (mInputDevice.CountChannels() == 2) mOutputIOProc = OutputIOProc; else mOutputIOProc = OutputIOProc16; verify_noerr (AudioDeviceAddIOProc(mOutputDevice.mID, mOutputIOProc, this)); verify_noerr (AudioDeviceStart(mOutputDevice.mID, mOutputIOProc)); // UInt32 propsize = sizeof(UInt32); // UInt32 isAlreadyRunning; // err = (AudioDeviceGetProperty(mOutputDevice.mID, 0, false, kAudioDevicePropertyDeviceIsRunning, &propsize, &isAlreadyRunning)); // if (isAlreadyRunning) // mOutputProcState = kRunning; // else while (mInputProcState != kRunning || mOutputProcState != kRunning) usleep(1000); // usleep(12000); ComputeThruOffset(); }
void AudioThruEngine::SetExtraLatency(SInt32 frames) { mExtraLatencyFrames = frames; if (mRunning) ComputeThruOffset(); }