void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) { bool hasReverb = false; float reverbTime, wetLevel; auto& reverbSettings = AudioMixer::getReverbSettings(); auto& audioZones = AudioMixer::getAudioZones(); AvatarAudioStream* stream = data.getAvatarAudioStream(); glm::vec3 streamPosition = stream->getPosition(); // find reverb properties for (int i = 0; i < reverbSettings.size(); ++i) { AABox box = audioZones[reverbSettings[i].zone]; if (box.contains(streamPosition)) { hasReverb = true; reverbTime = reverbSettings[i].reverbTime; wetLevel = reverbSettings[i].wetLevel; break; } } // check if data changed bool dataChanged = (stream->hasReverb() != hasReverb) || (stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel)); if (dataChanged) { // update stream if (hasReverb) { stream->setReverb(reverbTime, wetLevel); } else { stream->clearReverb(); } } // send packet at change or every so often float CHANCE_OF_SEND = 0.01f; bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND); if (sendData) { // size the packet unsigned char bitset = 0; int packetSize = sizeof(bitset); if (hasReverb) { packetSize += sizeof(reverbTime) + sizeof(wetLevel); } // write the packet auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize); if (hasReverb) { setAtBit(bitset, HAS_REVERB_BIT); } envPacket->writePrimitive(bitset); if (hasReverb) { envPacket->writePrimitive(reverbTime); envPacket->writePrimitive(wetLevel); } // send the packet DependencyManager::get<NodeList>()->sendPacket(std::move(envPacket), *node); } }
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) { float gain = 1.0f; // injector: apply attenuation if (streamToAdd.getType() == PositionalAudioStream::Injector) { gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio(); // avatar: apply fixed off-axis attenuation to make them quieter as they turn away } else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) { glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition; // source directivity is based on angle of emission, in local coordinates glm::vec3 direction = glm::normalize(rotatedListenerPosition); float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward" const float MAX_OFF_AXIS_ATTENUATION = 0.2f; const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f; float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO)); gain *= offAxisCoefficient; // apply master gain, only to avatars gain *= listenerNodeData.getMasterAvatarGain(); } auto& audioZones = AudioMixer::getAudioZones(); auto& zoneSettings = AudioMixer::getZoneSettings(); // find distance attenuation coefficient float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance(); for (int i = 0; i < zoneSettings.length(); ++i) { if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) && audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) { attenuationPerDoublingInDistance = zoneSettings[i].coefficient; break; } } // distance attenuation const float ATTENUATION_START_DISTANCE = 1.0f; float distance = glm::length(relativePosition); assert(ATTENUATION_START_DISTANCE > EPSILON); if (distance >= ATTENUATION_START_DISTANCE) { // translate the zone setting to gain per log2(distance) float g = 1.0f - attenuationPerDoublingInDistance; g = glm::clamp(g, EPSILON, 1.0f); // calculate the distance coefficient using the distance to this node float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distance/ATTENUATION_START_DISTANCE)); // multiply the current attenuation coefficient by the distance coefficient gain *= distanceCoefficient; } return gain; }
float AudioMixer::gainForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream, const glm::vec3& relativePosition, bool isEcho) { float gain = 1.0f; float distanceBetween = glm::length(relativePosition); if (distanceBetween < EPSILON) { distanceBetween = EPSILON; } if (streamToAdd.getType() == PositionalAudioStream::Injector) { gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio(); } if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) { // source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition; float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedListenerPosition)); const float MAX_OFF_AXIS_ATTENUATION = 0.2f; const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f; float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO)); // multiply the current attenuation coefficient by the calculated off axis coefficient gain *= offAxisCoefficient; } float attenuationPerDoublingInDistance = _attenuationPerDoublingInDistance; for (int i = 0; i < _zonesSettings.length(); ++i) { if (_audioZones[_zonesSettings[i].source].contains(streamToAdd.getPosition()) && _audioZones[_zonesSettings[i].listener].contains(listeningNodeStream.getPosition())) { attenuationPerDoublingInDistance = _zonesSettings[i].coefficient; break; } } if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) { // translate the zone setting to gain per log2(distance) float g = 1.0f - attenuationPerDoublingInDistance; g = (g < EPSILON) ? EPSILON : g; g = (g > 1.0f) ? 1.0f : g; // calculate the distance coefficient using the distance to this node float distanceCoefficient = exp2f(log2f(g) * log2f(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE)); // multiply the current attenuation coefficient by the distance coefficient gain *= distanceCoefficient; } return gain; }
float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) { float gain = 1.0f; // injector: apply attenuation if (streamToAdd.getType() == PositionalAudioStream::Injector) { gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio(); } // avatar: skip attenuation - it is too costly to approximate // distance attenuation: approximate, ignore zone-specific attenuations glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition(); float distance = glm::length(relativePosition); return gain / distance; // avatar: skip master gain - it is constant for all streams }
void AudioMixerSlave::updateHRTFParameters(AudioMixerClientData::MixableStream& mixableStream, AvatarAudioStream& listeningNodeStream, float masterListenerGain) { auto streamToAdd = mixableStream.positionalStream; // check if this is a server echo of a source back to itself bool isEcho = (streamToAdd == &listeningNodeStream); glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream.getPosition(); float distance = glm::max(glm::length(relativePosition), EPSILON); float gain = computeGain(masterListenerGain, listeningNodeStream, *streamToAdd, relativePosition, distance, isEcho); float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition); mixableStream.hrtf->setParameterHistory(azimuth, distance, gain); ++stats.hrtfUpdates; }
void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID, const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, bool throttle) { ++stats.totalMixes; // to reduce artifacts we call the HRTF functor for every source, even if throttled or silent // this ensures the correct tail from last mixed block and the correct spatialization of next first block // check if this is a server echo of a source back to itself bool isEcho = (&streamToAdd == &listeningNodeStream); glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition(); float distance = glm::max(glm::length(relativePosition), EPSILON); float gain = computeGain(listenerNodeData, listeningNodeStream, streamToAdd, relativePosition, isEcho); float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition); const int HRTF_DATASET_INDEX = 1; if (!streamToAdd.lastPopSucceeded()) { bool forceSilentBlock = true; if (!streamToAdd.getLastPopOutput().isNull()) { bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd); // in an injector, just go silent - the injector has likely ended // in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence if (!isInjector) { // calculate its fade factor, which depends on how many times it's already been repeated. float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1); if (fadeFactor > 0.0f) { // apply the fadeFactor to the gain gain *= fadeFactor; forceSilentBlock = false; } } } if (forceSilentBlock) { // call renderSilent with a forced silent block to reduce artifacts // (this is not done for stereo streams since they do not go through the HRTF) if (!streamToAdd.isStereo() && !isEcho) { // get the existing listener-source HRTF object, or create a new one auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {}; hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfSilentRenders; } return; } } // grab the stream from the ring buffer AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput(); // stereo sources are not passed through HRTF if (streamToAdd.isStereo()) { for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) { _mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE); } ++stats.manualStereoMixes; return; } // echo sources are not passed through HRTF if (isEcho) { for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) { auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE); _mixSamples[i] += monoSample; _mixSamples[i + 1] += monoSample; } ++stats.manualEchoMixes; return; } // get the existing listener-source HRTF object, or create a new one auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); if (streamToAdd.getLastPopOutputLoudness() == 0.0f) { // call renderSilent to reduce artifacts hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfSilentRenders; return; } if (throttle) { // call renderSilent with actual frame data and a gain of 0.0f to reduce artifacts hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfThrottleRenders; return; } hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfRenders; }
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) { AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream(); AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData()); // if we received an invalid position from this listener, then refuse to make them a mix // because we don't know how to do it properly if (!listenerAudioStream->hasValidPosition()) { return false; } // zero out the mix for this listener memset(_mixSamples, 0, sizeof(_mixSamples)); bool isThrottling = _throttlingRatio > 0.0f; std::vector<std::pair<float, SharedNodePointer>> throttledNodes; typedef void (AudioMixerSlave::*MixFunctor)( AudioMixerClientData&, const QUuid&, const AvatarAudioStream&, const PositionalAudioStream&); auto forAllStreams = [&](const SharedNodePointer& node, AudioMixerClientData* nodeData, MixFunctor mixFunctor) { auto nodeID = node->getUUID(); for (auto& streamPair : nodeData->getAudioStreams()) { auto nodeStream = streamPair.second; (this->*mixFunctor)(*listenerData, nodeID, *listenerAudioStream, *nodeStream); } }; #ifdef HIFI_AUDIO_MIXER_DEBUG auto mixStart = p_high_resolution_clock::now(); #endif std::for_each(_begin, _end, [&](const SharedNodePointer& node) { AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData()); if (!nodeData) { return; } if (*node == *listener) { // only mix the echo, if requested for (auto& streamPair : nodeData->getAudioStreams()) { auto nodeStream = streamPair.second; if (nodeStream->shouldLoopbackForNode()) { mixStream(*listenerData, node->getUUID(), *listenerAudioStream, *nodeStream); } } } else if (!listenerData->shouldIgnore(listener, node, _frame)) { if (!isThrottling) { forAllStreams(node, nodeData, &AudioMixerSlave::mixStream); } else { auto nodeID = node->getUUID(); // compute the node's max relative volume float nodeVolume = 0.0f; for (auto& streamPair : nodeData->getAudioStreams()) { auto nodeStream = streamPair.second; // approximate the gain glm::vec3 relativePosition = nodeStream->getPosition() - listenerAudioStream->getPosition(); float gain = approximateGain(*listenerAudioStream, *nodeStream, relativePosition); // modify by hrtf gain adjustment auto& hrtf = listenerData->hrtfForStream(nodeID, nodeStream->getStreamIdentifier()); gain *= hrtf.getGainAdjustment(); auto streamVolume = nodeStream->getLastPopOutputTrailingLoudness() * gain; nodeVolume = std::max(streamVolume, nodeVolume); } // max-heapify the nodes by relative volume throttledNodes.push_back({ nodeVolume, node }); std::push_heap(throttledNodes.begin(), throttledNodes.end()); } } }); if (isThrottling) { // pop the loudest nodes off the heap and mix their streams int numToRetain = (int)(std::distance(_begin, _end) * (1 - _throttlingRatio)); for (int i = 0; i < numToRetain; i++) { if (throttledNodes.empty()) { break; } std::pop_heap(throttledNodes.begin(), throttledNodes.end()); auto& node = throttledNodes.back().second; AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData()); forAllStreams(node, nodeData, &AudioMixerSlave::mixStream); throttledNodes.pop_back(); } // throttle the remaining nodes' streams for (const std::pair<float, SharedNodePointer>& nodePair : throttledNodes) { auto& node = nodePair.second; AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData()); forAllStreams(node, nodeData, &AudioMixerSlave::throttleStream); } } #ifdef HIFI_AUDIO_MIXER_DEBUG auto mixEnd = p_high_resolution_clock::now(); auto mixTime = std::chrono::duration_cast<std::chrono::nanoseconds>(mixEnd - mixStart); stats.mixTime += mixTime.count(); #endif // check for silent audio before limiting // limiting uses a dither and can only guarantee abs(sample) <= 1 bool hasAudio = false; for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) { if (_mixSamples[i] != 0.0f) { hasAudio = true; break; } } // use the per listener AudioLimiter to render the mixed data listenerData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); return hasAudio; }
float computeGain(float masterListenerGain, const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, float distance, bool isEcho) { float gain = 1.0f; // injector: apply attenuation if (streamToAdd.getType() == PositionalAudioStream::Injector) { gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio(); // avatar: apply fixed off-axis attenuation to make them quieter as they turn away } else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) { glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition; // source directivity is based on angle of emission, in local coordinates glm::vec3 direction = glm::normalize(rotatedListenerPosition); float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward" const float MAX_OFF_AXIS_ATTENUATION = 0.2f; const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f; float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO)); gain *= offAxisCoefficient; // apply master gain, only to avatars gain *= masterListenerGain; } auto& audioZones = AudioMixer::getAudioZones(); auto& zoneSettings = AudioMixer::getZoneSettings(); // find distance attenuation coefficient float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance(); for (const auto& settings : zoneSettings) { if (audioZones[settings.source].area.contains(streamToAdd.getPosition()) && audioZones[settings.listener].area.contains(listeningNodeStream.getPosition())) { attenuationPerDoublingInDistance = settings.coefficient; break; } } if (attenuationPerDoublingInDistance < 0.0f) { // translate a negative zone setting to distance limit const float MIN_DISTANCE_LIMIT = ATTN_DISTANCE_REF + 1.0f; // silent after 1m float distanceLimit = std::max(-attenuationPerDoublingInDistance, MIN_DISTANCE_LIMIT); // calculate the LINEAR attenuation using the distance to this node // reference attenuation of 0dB at distance = ATTN_DISTANCE_REF float d = distance - ATTN_DISTANCE_REF; gain *= std::max(1.0f - d / (distanceLimit - ATTN_DISTANCE_REF), 0.0f); gain = std::min(gain, ATTN_GAIN_MAX); } else { // translate a positive zone setting to gain per log2(distance) const float MIN_ATTENUATION_COEFFICIENT = 0.001f; // -60dB per log2(distance) float g = glm::clamp(1.0f - attenuationPerDoublingInDistance, MIN_ATTENUATION_COEFFICIENT, 1.0f); // calculate the LOGARITHMIC attenuation using the distance to this node // reference attenuation of 0dB at distance = ATTN_DISTANCE_REF float d = (1.0f / ATTN_DISTANCE_REF) * std::max(distance, HRTF_NEARFIELD_MIN); gain *= fastExp2f(fastLog2f(g) * fastLog2f(d)); gain = std::min(gain, ATTN_GAIN_MAX); } return gain; }
void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStream, AvatarAudioStream& listeningNodeStream, float masterListenerGain, bool isSoloing) { ++stats.totalMixes; auto streamToAdd = mixableStream.positionalStream; // check if this is a server echo of a source back to itself bool isEcho = (streamToAdd == &listeningNodeStream); glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream.getPosition(); float distance = glm::max(glm::length(relativePosition), EPSILON); float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition); float gain = masterListenerGain; if (!isSoloing) { gain = computeGain(masterListenerGain, listeningNodeStream, *streamToAdd, relativePosition, distance, isEcho); } const int HRTF_DATASET_INDEX = 1; if (!streamToAdd->lastPopSucceeded()) { bool forceSilentBlock = true; if (!streamToAdd->getLastPopOutput().isNull()) { bool isInjector = dynamic_cast<const InjectedAudioStream*>(streamToAdd); // in an injector, just go silent - the injector has likely ended // in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence if (!isInjector) { // calculate its fade factor, which depends on how many times it's already been repeated. float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd->getConsecutiveNotMixedCount() - 1); if (fadeFactor > 0.0f) { // apply the fadeFactor to the gain gain *= fadeFactor; forceSilentBlock = false; } } } if (forceSilentBlock) { // call renderSilent with a forced silent block to reduce artifacts // (this is not done for stereo streams since they do not go through the HRTF) if (!streamToAdd->isStereo() && !isEcho) { static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {}; mixableStream.hrtf->render(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfRenders; } return; } } // grab the stream from the ring buffer AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput(); // stereo sources are not passed through HRTF if (streamToAdd->isStereo()) { // apply the avatar gain adjustment gain *= mixableStream.hrtf->getGainAdjustment(); const float scale = 1 / 32768.0f; // int16_t to float for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) { _mixSamples[2*i+0] += (float)streamPopOutput[2*i+0] * gain * scale; _mixSamples[2*i+1] += (float)streamPopOutput[2*i+1] * gain * scale; } ++stats.manualStereoMixes; } else if (isEcho) { // echo sources are not passed through HRTF const float scale = 1/32768.0f; // int16_t to float for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) { float sample = (float)streamPopOutput[i] * gain * scale; _mixSamples[2*i+0] += sample; _mixSamples[2*i+1] += sample; } ++stats.manualEchoMixes; } else { streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); mixableStream.hrtf->render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++stats.hrtfRenders; } }
void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData, const PositionalAudioStream& streamToAdd, const QUuid& sourceNodeID, const AvatarAudioStream& listeningNodeStream) { // to reduce artifacts we calculate the gain and azimuth for every source for this listener // even if we are not going to end up mixing in this source ++_totalMixes; // this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct // check if this is a server echo of a source back to itself bool isEcho = (&streamToAdd == &listeningNodeStream); glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition(); // figure out the distance between source and listener float distance = glm::max(glm::length(relativePosition), EPSILON); // figure out the gain for this source at the listener float gain = gainForSource(streamToAdd, listeningNodeStream, relativePosition, isEcho); // figure out the azimuth to this source at the listener float azimuth = isEcho ? 0.0f : azimuthForSource(streamToAdd, listeningNodeStream, relativePosition); float repeatedFrameFadeFactor = 1.0f; static const int HRTF_DATASET_INDEX = 1; if (!streamToAdd.lastPopSucceeded()) { bool forceSilentBlock = true; if (_streamSettings._repetitionWithFade && !streamToAdd.getLastPopOutput().isNull()) { // reptition with fade is enabled, and we do have a valid previous frame to repeat // so we mix the previously-mixed block // this is preferable to not mixing it at all to avoid the harsh jump to silence // we'll repeat the last block until it has a block to mix // and we'll gradually fade that repeated block into silence. // calculate its fade factor, which depends on how many times it's already been repeated. repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1); if (repeatedFrameFadeFactor > 0.0f) { // apply the repeatedFrameFadeFactor to the gain gain *= repeatedFrameFadeFactor; forceSilentBlock = false; } } if (forceSilentBlock) { // we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled // in this case we will call renderSilent with a forced silent block // this ensures the correct tail from the previously mixed block and the correct spatialization of first block // of any upcoming audio if (!streamToAdd.isStereo() && !isEcho) { // get the existing listener-source HRTF object, or create a new one auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); // this is not done for stereo streams since they do not go through the HRTF static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {}; hrtf.renderSilent(silentMonoBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++_hrtfSilentRenders;; } return; } } // grab the stream from the ring buffer AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput(); if (streamToAdd.isStereo() || isEcho) { // this is a stereo source or server echo so we do not pass it through the HRTF // simply apply our calculated gain to each sample if (streamToAdd.isStereo()) { for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) { _mixedSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE); } ++_manualStereoMixes; } else { for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) { auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE); _mixedSamples[i] += monoSample; _mixedSamples[i + 1] += monoSample; } ++_manualEchoMixes; } return; } // get the existing listener-source HRTF object, or create a new one auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); static int16_t streamBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL]; streamPopOutput.readSamples(streamBlock, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); // if the frame we're about to mix is silent, simply call render silent and move on if (streamToAdd.getLastPopOutputLoudness() == 0.0f) { // silent frame from source // we still need to call renderSilent via the HRTF for mono source hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++_hrtfSilentRenders; return; } if (_performanceThrottlingRatio > 0.0f && streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= _minAudibilityThreshold) { // the mixer is struggling so we're going to drop off some streams // we call renderSilent via the HRTF with the actual frame data and a gain of 0.0 hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); ++_hrtfStruggleRenders; return; } ++_hrtfRenders; // mono stream, call the HRTF with our block and calculated azimuth and gain hrtf.render(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); }