inline void Audio::performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight) { NodeList* nodeList = NodeList::getInstance(); Application* interface = Application::getInstance(); Avatar* interfaceAvatar = interface->getAvatar(); memset(outputLeft, 0, PACKET_LENGTH_BYTES_PER_CHANNEL); memset(outputRight, 0, PACKET_LENGTH_BYTES_PER_CHANNEL); // Add Procedural effects to input samples addProceduralSounds(inputLeft, outputLeft, outputRight, BUFFER_LENGTH_SAMPLES_PER_CHANNEL); if (nodeList && inputLeft) { // Measure the loudness of the signal from the microphone and store in audio object float loudness = 0; for (int i = 0; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) { loudness += abs(inputLeft[i]); } loudness /= BUFFER_LENGTH_SAMPLES_PER_CHANNEL; _lastInputLoudness = loudness; // add input (@microphone) data to the scope _scope->addSamples(0, inputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL); Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER); if (audioMixer) { audioMixer->lock(); sockaddr_in audioSocket = *(sockaddr_in*) audioMixer->getActiveSocket(); audioMixer->unlock(); glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition(); glm::quat headOrientation = interfaceAvatar->getHead().getOrientation(); int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO); int leadingBytes = numBytesPacketHeader + sizeof(headPosition) + sizeof(headOrientation); // we need the amount of bytes in the buffer + 1 for type // + 12 for 3 floats for position + float for bearing + 1 attenuation byte unsigned char dataPacket[MAX_PACKET_SIZE]; PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoAudio) ? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO : PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO; unsigned char* currentPacketPtr = dataPacket + populateTypeAndVersion(dataPacket, packetType); // pack Source Data uint16_t ownerID = NodeList::getInstance()->getOwnerID(); memcpy(currentPacketPtr, &ownerID, sizeof(ownerID)); currentPacketPtr += (sizeof(ownerID)); leadingBytes += (sizeof(ownerID)); // pack Listen Mode Data memcpy(currentPacketPtr, &_listenMode, sizeof(_listenMode)); currentPacketPtr += (sizeof(_listenMode)); leadingBytes += (sizeof(_listenMode)); if (_listenMode == AudioRingBuffer::OMNI_DIRECTIONAL_POINT) { memcpy(currentPacketPtr, &_listenRadius, sizeof(_listenRadius)); currentPacketPtr += (sizeof(_listenRadius)); leadingBytes += (sizeof(_listenRadius)); } else if (_listenMode == AudioRingBuffer::SELECTED_SOURCES) { int listenSourceCount = _listenSources.size(); memcpy(currentPacketPtr, &listenSourceCount, sizeof(listenSourceCount)); currentPacketPtr += (sizeof(listenSourceCount)); leadingBytes += (sizeof(listenSourceCount)); for (int i = 0; i < listenSourceCount; i++) { memcpy(currentPacketPtr, &_listenSources[i], sizeof(_listenSources[i])); currentPacketPtr += sizeof(_listenSources[i]); leadingBytes += sizeof(_listenSources[i]); } } // memcpy the three float positions memcpy(currentPacketPtr, &headPosition, sizeof(headPosition)); currentPacketPtr += (sizeof(headPosition)); // memcpy our orientation memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation)); currentPacketPtr += sizeof(headOrientation); // copy the audio data to the last BUFFER_LENGTH_BYTES bytes of the data packet memcpy(currentPacketPtr, inputLeft, BUFFER_LENGTH_BYTES_PER_CHANNEL); nodeList->getNodeSocket()->send((sockaddr*) &audioSocket, dataPacket, BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes); interface->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO).updateValue(BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes); } } AudioRingBuffer* ringBuffer = &_ringBuffer; // if there is anything in the ring buffer, decide what to do: if (ringBuffer->getEndOfLastWrite()) { if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < (PACKET_LENGTH_SAMPLES + _jitterBufferSamples * (ringBuffer->isStereo() ? 2 : 1))) { // // If not enough audio has arrived to start playback, keep waiting // #ifdef SHOW_AUDIO_DEBUG qDebug("%i,%i,%i,%i\n", _packetsReceivedThisPlayback, ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES, _jitterBufferSamples); #endif } else if (ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() == 0) { // // If we have started and now have run out of audio to send to the audio device, // this means we've starved and should restart. // ringBuffer->setStarted(false); _numStarves++; _packetsReceivedThisPlayback = 0; _wasStarved = 10; // Frames for which to render the indication that the system was starved. #ifdef SHOW_AUDIO_DEBUG qDebug("Starved, remaining samples = %d\n", ringBuffer->diffLastWriteNextOutput()); #endif } else { // // We are either already playing back, or we have enough audio to start playing back. // if (!ringBuffer->isStarted()) { ringBuffer->setStarted(true); #ifdef SHOW_AUDIO_DEBUG qDebug("starting playback %0.1f msecs delayed, jitter = %d, pkts recvd: %d \n", (usecTimestampNow() - usecTimestamp(&_firstPacketReceivedTime))/1000.0, _jitterBufferSamples, _packetsReceivedThisPlayback); #endif } // // play whatever we have in the audio buffer // // if we haven't fired off the flange effect, check if we should // TODO: lastMeasuredHeadYaw is now relative to body - check if this still works. int lastYawMeasured = fabsf(interfaceAvatar->getHeadYawRate()); if (!_samplesLeftForFlange && lastYawMeasured > MIN_FLANGE_EFFECT_THRESHOLD) { // we should flange for one second if ((_lastYawMeasuredMaximum = std::max(_lastYawMeasuredMaximum, lastYawMeasured)) != lastYawMeasured) { _lastYawMeasuredMaximum = std::min(_lastYawMeasuredMaximum, MIN_FLANGE_EFFECT_THRESHOLD); _samplesLeftForFlange = SAMPLE_RATE; _flangeIntensity = MIN_FLANGE_INTENSITY + ((_lastYawMeasuredMaximum - MIN_FLANGE_EFFECT_THRESHOLD) / (float)(MAX_FLANGE_EFFECT_THRESHOLD - MIN_FLANGE_EFFECT_THRESHOLD)) * (1 - MIN_FLANGE_INTENSITY); _flangeRate = FLANGE_BASE_RATE * _flangeIntensity; _flangeWeight = MAX_FLANGE_SAMPLE_WEIGHT * _flangeIntensity; } } for (int s = 0; s < PACKET_LENGTH_SAMPLES_PER_CHANNEL; s++) { int leftSample = ringBuffer->getNextOutput()[s]; int rightSample = ringBuffer->getNextOutput()[s + PACKET_LENGTH_SAMPLES_PER_CHANNEL]; if (_samplesLeftForFlange > 0) { float exponent = (SAMPLE_RATE - _samplesLeftForFlange - (SAMPLE_RATE / _flangeRate)) / (SAMPLE_RATE / _flangeRate); int sampleFlangeDelay = (SAMPLE_RATE / (1000 * _flangeIntensity)) * powf(2, exponent); if (_samplesLeftForFlange != SAMPLE_RATE || s >= (SAMPLE_RATE / 2000)) { // we have a delayed sample to add to this sample int16_t *flangeFrame = ringBuffer->getNextOutput(); int flangeIndex = s - sampleFlangeDelay; if (flangeIndex < 0) { // we need to grab the flange sample from earlier in the buffer flangeFrame = ringBuffer->getNextOutput() != ringBuffer->getBuffer() ? ringBuffer->getNextOutput() - PACKET_LENGTH_SAMPLES : ringBuffer->getNextOutput() + RING_BUFFER_LENGTH_SAMPLES - PACKET_LENGTH_SAMPLES; flangeIndex = PACKET_LENGTH_SAMPLES_PER_CHANNEL + (s - sampleFlangeDelay); } int16_t leftFlangeSample = flangeFrame[flangeIndex]; int16_t rightFlangeSample = flangeFrame[flangeIndex + PACKET_LENGTH_SAMPLES_PER_CHANNEL]; leftSample = (1 - _flangeWeight) * leftSample + (_flangeWeight * leftFlangeSample); rightSample = (1 - _flangeWeight) * rightSample + (_flangeWeight * rightFlangeSample); _samplesLeftForFlange--; if (_samplesLeftForFlange == 0) { _lastYawMeasuredMaximum = 0; } } } #ifndef TEST_AUDIO_LOOPBACK outputLeft[s] += leftSample; outputRight[s] += rightSample; #else outputLeft[s] += inputLeft[s]; outputRight[s] += inputLeft[s]; #endif } ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES); if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) { ringBuffer->setNextOutput(ringBuffer->getBuffer()); } } } eventuallySendRecvPing(inputLeft, outputLeft, outputRight); // add output (@speakers) data just written to the scope _scope->addSamples(1, outputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL); _scope->addSamples(2, outputRight, BUFFER_LENGTH_SAMPLES_PER_CHANNEL); gettimeofday(&_lastCallbackTime, NULL); }
int audioCallback (const void *inputBuffer, void *outputBuffer, unsigned long frames, const PaStreamCallbackTimeInfo *timeInfo, PaStreamCallbackFlags statusFlags, void *userData) { AudioData *data = (AudioData *) userData; int16_t *inputLeft = ((int16_t **) inputBuffer)[0]; // int16_t *inputRight = ((int16_t **) inputBuffer)[1]; //printf("Audio callback at %6.0f\n", usecTimestampNow()/1000); if (inputLeft != NULL) { // // Measure the loudness of the signal from the microphone and store in audio object // float loudness = 0; for (int i = 0; i < BUFFER_LENGTH_SAMPLES; i++) { loudness += abs(inputLeft[i]); } loudness /= BUFFER_LENGTH_SAMPLES; data->lastInputLoudness = loudness; data->averagedInputLoudness = 0.66*data->averagedInputLoudness + 0.33*loudness; // // If scope is turned on, copy input buffer to scope // if (scope->getState()) { for (int i = 0; i < BUFFER_LENGTH_SAMPLES; i++) { scope->addData((float)inputLeft[i]/32767.0, 1, i); } } if (data->mixerAddress != 0) { sockaddr_in audioMixerSocket; audioMixerSocket.sin_family = AF_INET; audioMixerSocket.sin_addr.s_addr = data->mixerAddress; audioMixerSocket.sin_port = data->mixerPort; int leadingBytes = 2 + (sizeof(float) * 4); // we need the amount of bytes in the buffer + 1 for type // + 12 for 3 floats for position + float for bearing + 1 attenuation byte unsigned char dataPacket[BUFFER_LENGTH_BYTES + leadingBytes]; dataPacket[0] = PACKET_HEADER_INJECT_AUDIO; unsigned char *currentPacketPtr = dataPacket + 1; // memcpy the three float positions for (int p = 0; p < 3; p++) { memcpy(currentPacketPtr, &data->linkedHead->getPos()[p], sizeof(float)); currentPacketPtr += sizeof(float); } // tell the mixer not to add additional attenuation to our source *(currentPacketPtr++) = 255; // memcpy the corrected render yaw float correctedYaw = fmodf(data->linkedHead->getRenderYaw(), 360); if (correctedYaw > 180) { correctedYaw -= 360; } else if (correctedYaw < -180) { correctedYaw += 360; } if (data->mixerLoopbackFlag) { correctedYaw = correctedYaw > 0 ? correctedYaw + AGENT_LOOPBACK_MODIFIER : correctedYaw - AGENT_LOOPBACK_MODIFIER; } memcpy(currentPacketPtr, &correctedYaw, sizeof(float)); currentPacketPtr += sizeof(float); // if (samplesLeftForWalk == 0) { // sampleWalkPointer = walkingSoundArray; // } // // if (data->playWalkSound) { // // if this boolean is true and we aren't currently playing the walk sound // // set the number of samples left for walk // samplesLeftForWalk = walkingSoundSamples; // data->playWalkSound = false; // } // // if (samplesLeftForWalk > 0) { // // we need to play part of the walking sound // // so add it in // int affectedSamples = std::min(samplesLeftForWalk, BUFFER_LENGTH_SAMPLES); // for (int i = 0; i < affectedSamples; i++) { // inputLeft[i] += *sampleWalkPointer; // inputLeft[i] = std::max(inputLeft[i], std::numeric_limits<int16_t>::min()); // inputLeft[i] = std::min(inputLeft[i], std::numeric_limits<int16_t>::max()); // // sampleWalkPointer++; // samplesLeftForWalk--; // // if (sampleWalkPointer - walkingSoundArray > walkingSoundSamples) { // sampleWalkPointer = walkingSoundArray; // }; // } // } // // copy the audio data to the last BUFFER_LENGTH_BYTES bytes of the data packet memcpy(currentPacketPtr, inputLeft, BUFFER_LENGTH_BYTES); data->audioSocket->send((sockaddr *)&audioMixerSocket, dataPacket, BUFFER_LENGTH_BYTES + leadingBytes); } } int16_t *outputLeft = ((int16_t **) outputBuffer)[0]; int16_t *outputRight = ((int16_t **) outputBuffer)[1]; memset(outputLeft, 0, PACKET_LENGTH_BYTES_PER_CHANNEL); memset(outputRight, 0, PACKET_LENGTH_BYTES_PER_CHANNEL); AudioRingBuffer *ringBuffer = data->ringBuffer; // if we've been reset, and there isn't any new packets yet // just play some silence if (ringBuffer->getEndOfLastWrite() != NULL) { if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES) { //printf("Held back, buffer has %d of %d samples required.\n", ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES); } else if (ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES) { ringBuffer->setStarted(false); starve_counter++; packetsReceivedThisPlayback = 0; //printf("Starved #%d\n", starve_counter); data->wasStarved = 10; // Frames to render the indication that the system was starved. } else { if (!ringBuffer->isStarted()) { ringBuffer->setStarted(true); printf("starting playback %3.1f msecs delayed \n", (usecTimestampNow() - usecTimestamp(&firstPlaybackTimer))/1000.0); } else { //printf("pushing buffer\n"); } // play whatever we have in the audio buffer // if we haven't fired off the flange effect, check if we should int lastYawMeasured = fabsf(data->linkedHead->getLastMeasuredYaw()); if (!samplesLeftForFlange && lastYawMeasured > MIN_FLANGE_EFFECT_THRESHOLD) { // we should flange for one second if ((lastYawMeasuredMaximum = std::max(lastYawMeasuredMaximum, lastYawMeasured)) != lastYawMeasured) { lastYawMeasuredMaximum = std::min(lastYawMeasuredMaximum, MIN_FLANGE_EFFECT_THRESHOLD); samplesLeftForFlange = SAMPLE_RATE; flangeIntensity = MIN_FLANGE_INTENSITY + ((lastYawMeasuredMaximum - MIN_FLANGE_EFFECT_THRESHOLD) / (float)(MAX_FLANGE_EFFECT_THRESHOLD - MIN_FLANGE_EFFECT_THRESHOLD)) * (1 - MIN_FLANGE_INTENSITY); flangeRate = FLANGE_BASE_RATE * flangeIntensity; flangeWeight = MAX_FLANGE_SAMPLE_WEIGHT * flangeIntensity; } } // check if we have more than we need to play out // int thresholdFrames = ceilf((PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES) / (float)PACKET_LENGTH_SAMPLES); // int thresholdSamples = thresholdFrames * PACKET_LENGTH_SAMPLES; // // if (ringBuffer->diffLastWriteNextOutput() > thresholdSamples) { // // we need to push the next output forwards // int samplesToPush = ringBuffer->diffLastWriteNextOutput() - thresholdSamples; // // if (ringBuffer->getNextOutput() + samplesToPush > ringBuffer->getBuffer()) { // ringBuffer->setNextOutput(ringBuffer->getBuffer() + (samplesToPush - (ringBuffer->getBuffer() + RING_BUFFER_SAMPLES - ringBuffer->getNextOutput()))); // } else { // ringBuffer->setNextOutput(ringBuffer->getNextOutput() + samplesToPush); // } // } for (int s = 0; s < PACKET_LENGTH_SAMPLES_PER_CHANNEL; s++) { int leftSample = ringBuffer->getNextOutput()[s]; int rightSample = ringBuffer->getNextOutput()[s + PACKET_LENGTH_SAMPLES_PER_CHANNEL]; if (samplesLeftForFlange > 0) { float exponent = (SAMPLE_RATE - samplesLeftForFlange - (SAMPLE_RATE / flangeRate)) / (SAMPLE_RATE / flangeRate); int sampleFlangeDelay = (SAMPLE_RATE / (1000 * flangeIntensity)) * powf(2, exponent); if (samplesLeftForFlange != SAMPLE_RATE || s >= (SAMPLE_RATE / 2000)) { // we have a delayed sample to add to this sample int16_t *flangeFrame = ringBuffer->getNextOutput(); int flangeIndex = s - sampleFlangeDelay; if (flangeIndex < 0) { // we need to grab the flange sample from earlier in the buffer flangeFrame = ringBuffer->getNextOutput() != ringBuffer->getBuffer() ? ringBuffer->getNextOutput() - PACKET_LENGTH_SAMPLES : ringBuffer->getNextOutput() + RING_BUFFER_SAMPLES - PACKET_LENGTH_SAMPLES; flangeIndex = PACKET_LENGTH_SAMPLES_PER_CHANNEL + (s - sampleFlangeDelay); } int16_t leftFlangeSample = flangeFrame[flangeIndex]; int16_t rightFlangeSample = flangeFrame[flangeIndex + PACKET_LENGTH_SAMPLES_PER_CHANNEL]; leftSample = (1 - flangeWeight) * leftSample + (flangeWeight * leftFlangeSample); rightSample = (1 - flangeWeight) * rightSample + (flangeWeight * rightFlangeSample); samplesLeftForFlange--; if (samplesLeftForFlange == 0) { lastYawMeasuredMaximum = 0; } } } outputLeft[s] = leftSample; outputRight[s] = rightSample; } ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES); if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_SAMPLES) { ringBuffer->setNextOutput(ringBuffer->getBuffer()); } } } gettimeofday(&data->lastCallback, NULL); return paContinue; }
void *receiveAudioViaUDP(void *args) { AudioRecThreadStruct *threadArgs = (AudioRecThreadStruct *) args; AudioData *sharedAudioData = threadArgs->sharedAudioData; int16_t *receivedData = new int16_t[PACKET_LENGTH_SAMPLES]; ssize_t receivedBytes; // Init Jitter timer values timeval previousReceiveTime, currentReceiveTime = {}; gettimeofday(&previousReceiveTime, NULL); gettimeofday(¤tReceiveTime, NULL); int totalPacketsReceived = 0; stdev.reset(); if (LOG_SAMPLE_DELAY) { char *directory = new char[50]; char *filename = new char[50]; sprintf(directory, "%s/Desktop/echo_tests", getenv("HOME")); mkdir(directory, S_IRWXU | S_IRWXG | S_IRWXO); sprintf(filename, "%s/%ld.csv", directory, previousReceiveTime.tv_sec); logFile.open(filename, std::ios::out); delete[] directory; delete[] filename; } while (!stopAudioReceiveThread) { if (sharedAudioData->audioSocket->receive((void *)receivedData, &receivedBytes)) { gettimeofday(¤tReceiveTime, NULL); totalPacketsReceived++; if (LOG_SAMPLE_DELAY) { // write time difference (in microseconds) between packet receipts to file double timeDiff = diffclock(&previousReceiveTime, ¤tReceiveTime); logFile << timeDiff << std::endl; } double tDiff = diffclock(&previousReceiveTime, ¤tReceiveTime); //printf("tDiff %4.1f\n", tDiff); // Discard first few received packets for computing jitter (often they pile up on start) if (totalPacketsReceived > 3) stdev.addValue(tDiff); if (stdev.getSamples() > 500) { sharedAudioData->measuredJitter = stdev.getStDev(); //printf("Avg: %4.2f, Stdev: %4.2f\n", stdev.getAverage(), sharedAudioData->measuredJitter); stdev.reset(); } AudioRingBuffer *ringBuffer = sharedAudioData->ringBuffer; if (!ringBuffer->isStarted()) { packetsReceivedThisPlayback++; } else { //printf("Audio packet received at %6.0f\n", usecTimestampNow()/1000); } if (packetsReceivedThisPlayback == 1) gettimeofday(&firstPlaybackTimer, NULL); ringBuffer->parseData(receivedData, PACKET_LENGTH_BYTES); previousReceiveTime = currentReceiveTime; } } pthread_exit(0); }
//Allocate Audio Buffer List(s) to hold the data from input. OSStatus CAPlayThrough::SetupBuffers() { OSStatus err = noErr; UInt32 bufferSizeFrames,bufferSizeBytes,propsize; CAStreamBasicDescription asbd,asbd_dev1_in,asbd_dev2_out; Float64 rate=0; //Get the size of the IO buffer(s) UInt32 propertySize = sizeof(bufferSizeFrames); err = AudioUnitGetProperty(mInputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &bufferSizeFrames, &propertySize); bufferSizeBytes = bufferSizeFrames * sizeof(Float32); //Get the Stream Format (Output client side) propertySize = sizeof(asbd_dev1_in); err = AudioUnitGetProperty(mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &asbd_dev1_in, &propertySize); //printf("=====Input DEVICE stream format\n" ); //asbd_dev1_in.Print(); //Get the Stream Format (client side) propertySize = sizeof(asbd); err = AudioUnitGetProperty(mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &asbd, &propertySize); //printf("=====current Input (Client) stream format\n"); //asbd.Print(); //Get the Stream Format (Output client side) propertySize = sizeof(asbd_dev2_out); err = AudioUnitGetProperty(mOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd_dev2_out, &propertySize); //printf("=====Output (Device) stream format\n"); //asbd_dev2_out.Print(); ////////////////////////////////////// //Set the format of all the AUs to the input/output devices channel count //For a simple case, you want to set this to the lower of count of the channels //in the input device vs output device ////////////////////////////////////// asbd.mChannelsPerFrame =((asbd_dev1_in.mChannelsPerFrame < asbd_dev2_out.mChannelsPerFrame) ?asbd_dev1_in.mChannelsPerFrame :asbd_dev2_out.mChannelsPerFrame) ; //printf("Info: Input Device channel count=%ld\t Input Device channel count=%ld\n",asbd_dev1_in.mChannelsPerFrame,asbd_dev2_out.mChannelsPerFrame); //printf("Info: CAPlayThrough will use %ld channels\n",asbd.mChannelsPerFrame); // We must get the sample rate of the input device and set it to the stream format of AUHAL propertySize = sizeof(Float64); AudioDeviceGetProperty(mInputDevice.mID, 0, 1, kAudioDevicePropertyNominalSampleRate, &propertySize, &rate); asbd.mSampleRate =rate; propertySize = sizeof(asbd); //Set the new formats to the AUs... err = AudioUnitSetProperty(mInputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &asbd, propertySize); checkErr(err); err = AudioUnitSetProperty(mVarispeedUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, propertySize); checkErr(err); //Set the correct sample rate for the output device, but keep the channel count the same propertySize = sizeof(Float64); AudioDeviceGetProperty(mOutputDevice.mID, 0, 0, kAudioDevicePropertyNominalSampleRate, &propertySize, &rate); asbd.mSampleRate =rate; propertySize = sizeof(asbd); //Set the new audio stream formats for the rest of the AUs... err = AudioUnitSetProperty(mVarispeedUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, propertySize); checkErr(err); err = AudioUnitSetProperty(mOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, propertySize); checkErr(err); //calculate number of buffers from channels propsize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) *asbd.mChannelsPerFrame); //malloc buffer lists mInputBuffer = (AudioBufferList *)malloc(propsize); mInputBuffer->mNumberBuffers = asbd.mChannelsPerFrame; //pre-malloc buffers for AudioBufferLists for(UInt32 i =0; i< mInputBuffer->mNumberBuffers ; i++) { mInputBuffer->mBuffers[i].mNumberChannels = 1; mInputBuffer->mBuffers[i].mDataByteSize = bufferSizeBytes; mInputBuffer->mBuffers[i].mData = malloc(bufferSizeBytes); } //Alloc ring buffer that will hold data between the two audio devices mBuffer = new AudioRingBuffer(); mBuffer->Allocate(asbd.mChannelsPerFrame, asbd.mBytesPerFrame, bufferSizeFrames * 20); return err; }