Esempio n. 1
0
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
    bool hasReverb = false;
    float reverbTime, wetLevel;

    auto& reverbSettings = AudioMixer::getReverbSettings();
    auto& audioZones = AudioMixer::getAudioZones();

    AvatarAudioStream* stream = data.getAvatarAudioStream();
    glm::vec3 streamPosition = stream->getPosition();

    // find reverb properties
    for (int i = 0; i < reverbSettings.size(); ++i) {
        AABox box = audioZones[reverbSettings[i].zone];
        if (box.contains(streamPosition)) {
            hasReverb = true;
            reverbTime = reverbSettings[i].reverbTime;
            wetLevel = reverbSettings[i].wetLevel;
            break;
        }
    }

    // check if data changed
    bool dataChanged = (stream->hasReverb() != hasReverb) ||
        (stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel));
    if (dataChanged) {
        // update stream
        if (hasReverb) {
            stream->setReverb(reverbTime, wetLevel);
        } else {
            stream->clearReverb();
        }
    }

    // send packet at change or every so often
    float CHANCE_OF_SEND = 0.01f;
    bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);

    if (sendData) {
        // size the packet
        unsigned char bitset = 0;
        int packetSize = sizeof(bitset);
        if (hasReverb) {
            packetSize += sizeof(reverbTime) + sizeof(wetLevel);
        }

        // write the packet
        auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
        if (hasReverb) {
            setAtBit(bitset, HAS_REVERB_BIT);
        }
        envPacket->writePrimitive(bitset);
        if (hasReverb) {
            envPacket->writePrimitive(reverbTime);
            envPacket->writePrimitive(wetLevel);
        }

        // send the packet
        DependencyManager::get<NodeList>()->sendPacket(std::move(envPacket), *node);
    }
}
Esempio n. 2
0
bool shouldBeSkipped(MixableStream& stream, const Node& listener,
                     const AvatarAudioStream& listenerAudioStream,
                     const AudioMixerClientData& listenerData) {

    if (stream.nodeStreamID.nodeLocalID == listener.getLocalID()) {
        return !stream.positionalStream->shouldLoopbackForNode();
    }

    // grab the unprocessed ignores and unignores from and for this listener
    const auto& nodesIgnoredByListener = listenerData.getNewIgnoredNodeIDs();
    const auto& nodesUnignoredByListener = listenerData.getNewUnignoredNodeIDs();
    const auto& nodesIgnoringListener = listenerData.getNewIgnoringNodeIDs();
    const auto& nodesUnignoringListener = listenerData.getNewUnignoringNodeIDs();

    // this stream was previously not ignored by the listener and we have some newly ignored streams
    // check now if it is one of the ignored streams and flag it as such
    if (stream.ignoredByListener) {
        stream.ignoredByListener = !contains(nodesUnignoredByListener, stream.nodeStreamID.nodeID);
    } else {
        stream.ignoredByListener = contains(nodesIgnoredByListener, stream.nodeStreamID.nodeID);
    }

    if (stream.ignoringListener) {
        stream.ignoringListener = !contains(nodesUnignoringListener, stream.nodeStreamID.nodeID);
    } else {
        stream.ignoringListener = contains(nodesIgnoringListener, stream.nodeStreamID.nodeID);
    }

    bool listenerIsAdmin = listenerData.getRequestsDomainListData() && listener.getCanKick();
    if (stream.ignoredByListener || (stream.ignoringListener && !listenerIsAdmin)) {
        return true;
    }

    if (!listenerData.getSoloedNodes().empty()) {
        return !contains(listenerData.getSoloedNodes(), stream.nodeStreamID.nodeID);
    }

    bool shouldCheckIgnoreBox = (listenerAudioStream.isIgnoreBoxEnabled() ||
                                 stream.positionalStream->isIgnoreBoxEnabled());
    if (shouldCheckIgnoreBox &&
        listenerAudioStream.getIgnoreBox().touches(stream.positionalStream->getIgnoreBox())) {
        return true;
    }

    return false;
};
Esempio n. 3
0
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
        const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) {
    float gain = 1.0f;

    // injector: apply attenuation
    if (streamToAdd.getType() == PositionalAudioStream::Injector) {
        gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();

    // avatar: apply fixed off-axis attenuation to make them quieter as they turn away
    } else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
        glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;

        // source directivity is based on angle of emission, in local coordinates
        glm::vec3 direction = glm::normalize(rotatedListenerPosition);
        float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f));   // UNIT_NEG_Z is "forward"

        const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
        const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
        float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));

        gain *= offAxisCoefficient;

        // apply master gain, only to avatars
        gain *= listenerNodeData.getMasterAvatarGain();
    }

    auto& audioZones = AudioMixer::getAudioZones();
    auto& zoneSettings = AudioMixer::getZoneSettings();

    // find distance attenuation coefficient
    float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
    for (int i = 0; i < zoneSettings.length(); ++i) {
        if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
            audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
            attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
            break;
        }
    }

    // distance attenuation
    const float ATTENUATION_START_DISTANCE = 1.0f;
    float distance = glm::length(relativePosition);
    assert(ATTENUATION_START_DISTANCE > EPSILON);
    if (distance >= ATTENUATION_START_DISTANCE) {

        // translate the zone setting to gain per log2(distance)
        float g = 1.0f - attenuationPerDoublingInDistance;
        g = glm::clamp(g, EPSILON, 1.0f);

        // calculate the distance coefficient using the distance to this node
        float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distance/ATTENUATION_START_DISTANCE));

        // multiply the current attenuation coefficient by the distance coefficient
        gain *= distanceCoefficient;
    }

    return gain;
}
Esempio n. 4
0
float AudioMixer::gainForSource(const PositionalAudioStream& streamToAdd,
                                const AvatarAudioStream& listeningNodeStream, const glm::vec3& relativePosition, bool isEcho) {
    float gain = 1.0f;

    float distanceBetween = glm::length(relativePosition);

    if (distanceBetween < EPSILON) {
        distanceBetween = EPSILON;
    }

    if (streamToAdd.getType() == PositionalAudioStream::Injector) {
        gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
    }

    if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
        //  source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
        glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;

        float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
                                           glm::normalize(rotatedListenerPosition));

        const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
        const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;

        float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
        (OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));

        // multiply the current attenuation coefficient by the calculated off axis coefficient
        gain *= offAxisCoefficient;
    }

    float attenuationPerDoublingInDistance = _attenuationPerDoublingInDistance;
    for (int i = 0; i < _zonesSettings.length(); ++i) {
        if (_audioZones[_zonesSettings[i].source].contains(streamToAdd.getPosition()) &&
            _audioZones[_zonesSettings[i].listener].contains(listeningNodeStream.getPosition())) {
            attenuationPerDoublingInDistance = _zonesSettings[i].coefficient;
            break;
        }
    }

    if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {

        // translate the zone setting to gain per log2(distance)
        float g = 1.0f - attenuationPerDoublingInDistance;
        g = (g < EPSILON) ? EPSILON : g;
        g = (g > 1.0f) ? 1.0f : g;

        // calculate the distance coefficient using the distance to this node
        float distanceCoefficient = exp2f(log2f(g) * log2f(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));

        // multiply the current attenuation coefficient by the distance coefficient
        gain *= distanceCoefficient;
    }

    return gain;
}
Esempio n. 5
0
AudioMixerClientData::IgnoreZone& AudioMixerClientData::IgnoreZoneMemo::get(unsigned int frame) {
    // check for a memoized zone
    if (frame != _frame.load(std::memory_order_acquire)) {
        AvatarAudioStream* stream = _data.getAvatarAudioStream();

        // get the initial dimensions from the stream
        glm::vec3 corner = stream ? stream->getAvatarBoundingBoxCorner() : glm::vec3(0);
        glm::vec3 scale = stream ? stream->getAvatarBoundingBoxScale() : glm::vec3(0);

        // enforce a minimum scale
        static const glm::vec3 MIN_IGNORE_BOX_SCALE = glm::vec3(0.3f, 1.3f, 0.3f);
        if (glm::any(glm::lessThan(scale, MIN_IGNORE_BOX_SCALE))) {
            scale = MIN_IGNORE_BOX_SCALE;
        }

        // quadruple the scale (this is arbitrary number chosen for comfort)
        const float IGNORE_BOX_SCALE_FACTOR = 4.0f;
        scale *= IGNORE_BOX_SCALE_FACTOR;

        // create the box (we use a box for the zone for convenience)
        AABox box(corner, scale);

        // update the memoized zone
        // This may be called by multiple threads concurrently,
        // so take a lock and only update the memo if this call is first.
        // This prevents concurrent updates from invalidating the returned reference
        // (contingent on the preconditions listed in the header).
        std::lock_guard<std::mutex> lock(_mutex);
        if (frame != _frame.load(std::memory_order_acquire)) {
            _zone = box;
            unsigned int oldFrame = _frame.exchange(frame, std::memory_order_release);
            Q_UNUSED(oldFrame);
        }
    }

    return _zone;
}
Esempio n. 6
0
float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
    float gain = 1.0f;

    // injector: apply attenuation
    if (streamToAdd.getType() == PositionalAudioStream::Injector) {
        gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
    }

    // avatar: skip attenuation - it is too costly to approximate

    // distance attenuation: approximate, ignore zone-specific attenuations
    glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
    float distance = glm::length(relativePosition);
    return gain / distance;

    // avatar: skip master gain - it is constant for all streams
}
Esempio n. 7
0
void AudioMixerSlave::updateHRTFParameters(AudioMixerClientData::MixableStream& mixableStream,
                                      AvatarAudioStream& listeningNodeStream,
                                      float masterListenerGain) {
    auto streamToAdd = mixableStream.positionalStream;

    // check if this is a server echo of a source back to itself
    bool isEcho = (streamToAdd == &listeningNodeStream);

    glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream.getPosition();

    float distance = glm::max(glm::length(relativePosition), EPSILON);
    float gain = computeGain(masterListenerGain, listeningNodeStream, *streamToAdd, relativePosition, distance, isEcho);
    float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);

    mixableStream.hrtf->setParameterHistory(azimuth, distance, gain);

    ++stats.hrtfUpdates;
}
Esempio n. 8
0
float AudioMixer::azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
                                   const glm::vec3& relativePosition) {
    glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());

    //  Compute sample delay for the two ears to create phase panning
    glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;

    // project the rotated source position vector onto the XZ plane
    rotatedSourcePosition.y = 0.0f;

    static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;

    if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) {
        // produce an oriented angle about the y-axis
        return glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedSourcePosition), glm::vec3(0.0f, -1.0f, 0.0f));
    } else {
        // there is no distance between listener and source - return no azimuth
        return 0;
    }
}
Esempio n. 9
0
float computeAzimuth(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
        const glm::vec3& relativePosition) {
    glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());

    glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;

    // project the rotated source position vector onto the XZ plane
    rotatedSourcePosition.y = 0.0f;

    const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;

    float rotatedSourcePositionLength2 = glm::length2(rotatedSourcePosition);
    if (rotatedSourcePositionLength2 > SOURCE_DISTANCE_THRESHOLD) {
        
        // produce an oriented angle about the y-axis
        glm::vec3 direction = rotatedSourcePosition * (1.0f / fastSqrtf(rotatedSourcePositionLength2));
        float angle = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward"
        return (direction.x < 0.0f) ? -angle : angle;

    } else {   
        // no azimuth if they are in same spot
        return 0.0f; 
    }
}
Esempio n. 10
0
void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID,
        const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
        bool throttle) {
    ++stats.totalMixes;

    // to reduce artifacts we call the HRTF functor for every source, even if throttled or silent
    // this ensures the correct tail from last mixed block and the correct spatialization of next first block

    // check if this is a server echo of a source back to itself
    bool isEcho = (&streamToAdd == &listeningNodeStream);

    glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();

    float distance = glm::max(glm::length(relativePosition), EPSILON);
    float gain = computeGain(listenerNodeData, listeningNodeStream, streamToAdd, relativePosition, isEcho);
    float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);
    const int HRTF_DATASET_INDEX = 1;

    if (!streamToAdd.lastPopSucceeded()) {
        bool forceSilentBlock = true;

        if (!streamToAdd.getLastPopOutput().isNull()) {
            bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);

            // in an injector, just go silent - the injector has likely ended
            // in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
            if (!isInjector) {
                // calculate its fade factor, which depends on how many times it's already been repeated.
                float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
                if (fadeFactor > 0.0f) {
                    // apply the fadeFactor to the gain
                    gain *= fadeFactor;
                    forceSilentBlock = false;
                }
            }
        }

        if (forceSilentBlock) {
            // call renderSilent with a forced silent block to reduce artifacts
            // (this is not done for stereo streams since they do not go through the HRTF)
            if (!streamToAdd.isStereo() && !isEcho) {
                // get the existing listener-source HRTF object, or create a new one
                auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());

                static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
                hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                                  AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

                ++stats.hrtfSilentRenders;
            }

            return;
        }
    }

    // grab the stream from the ring buffer
    AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();

    // stereo sources are not passed through HRTF
    if (streamToAdd.isStereo()) {
        for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
            _mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
        }

        ++stats.manualStereoMixes;
        return;
    }

    // echo sources are not passed through HRTF
    if (isEcho) {
        for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
            auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
            _mixSamples[i] += monoSample;
            _mixSamples[i + 1] += monoSample;
        }

        ++stats.manualEchoMixes;
        return;
    }

    // get the existing listener-source HRTF object, or create a new one
    auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());

    streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
        // call renderSilent to reduce artifacts
        hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                          AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++stats.hrtfSilentRenders;
        return;
    }

    if (throttle) {
        // call renderSilent with actual frame data and a gain of 0.0f to reduce artifacts
        hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
                          AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++stats.hrtfThrottleRenders;
        return;
    }

    hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    ++stats.hrtfRenders;
}
Esempio n. 11
0
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
    AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
    AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());

    // if we received an invalid position from this listener, then refuse to make them a mix
    // because we don't know how to do it properly
    if (!listenerAudioStream->hasValidPosition()) {
        return false;
    }

    // zero out the mix for this listener
    memset(_mixSamples, 0, sizeof(_mixSamples));

    bool isThrottling = _throttlingRatio > 0.0f;
    std::vector<std::pair<float, SharedNodePointer>> throttledNodes;

    typedef void (AudioMixerSlave::*MixFunctor)(
            AudioMixerClientData&, const QUuid&, const AvatarAudioStream&, const PositionalAudioStream&);
    auto forAllStreams = [&](const SharedNodePointer& node, AudioMixerClientData* nodeData, MixFunctor mixFunctor) {
        auto nodeID = node->getUUID();
        for (auto& streamPair : nodeData->getAudioStreams()) {
            auto nodeStream = streamPair.second;
            (this->*mixFunctor)(*listenerData, nodeID, *listenerAudioStream, *nodeStream);
        }
    };

#ifdef HIFI_AUDIO_MIXER_DEBUG
    auto mixStart = p_high_resolution_clock::now();
#endif

    std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
        AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
        if (!nodeData) {
            return;
        }

        if (*node == *listener) {
            // only mix the echo, if requested
            for (auto& streamPair : nodeData->getAudioStreams()) {
                auto nodeStream = streamPair.second;
                if (nodeStream->shouldLoopbackForNode()) {
                    mixStream(*listenerData, node->getUUID(), *listenerAudioStream, *nodeStream);
                }
            }
        } else if (!listenerData->shouldIgnore(listener, node, _frame)) {
            if (!isThrottling) {
                forAllStreams(node, nodeData, &AudioMixerSlave::mixStream);
            } else {
                auto nodeID = node->getUUID();

                // compute the node's max relative volume
                float nodeVolume = 0.0f;
                for (auto& streamPair : nodeData->getAudioStreams()) {
                    auto nodeStream = streamPair.second;

                    // approximate the gain
                    glm::vec3 relativePosition = nodeStream->getPosition() - listenerAudioStream->getPosition();
                    float gain = approximateGain(*listenerAudioStream, *nodeStream, relativePosition);

                    // modify by hrtf gain adjustment
                    auto& hrtf = listenerData->hrtfForStream(nodeID, nodeStream->getStreamIdentifier());
                    gain *= hrtf.getGainAdjustment();

                    auto streamVolume = nodeStream->getLastPopOutputTrailingLoudness() * gain;
                    nodeVolume = std::max(streamVolume, nodeVolume);
                }

                // max-heapify the nodes by relative volume
                throttledNodes.push_back({ nodeVolume, node });
                std::push_heap(throttledNodes.begin(), throttledNodes.end());
            }
        }
    });

    if (isThrottling) {
        // pop the loudest nodes off the heap and mix their streams
        int numToRetain = (int)(std::distance(_begin, _end) * (1 - _throttlingRatio));
        for (int i = 0; i < numToRetain; i++) {
            if (throttledNodes.empty()) {
                break;
            }

            std::pop_heap(throttledNodes.begin(), throttledNodes.end());

            auto& node = throttledNodes.back().second;
            AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
            forAllStreams(node, nodeData, &AudioMixerSlave::mixStream);

            throttledNodes.pop_back();
        }

        // throttle the remaining nodes' streams
        for (const std::pair<float, SharedNodePointer>& nodePair : throttledNodes) {
            auto& node = nodePair.second;
            AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
            forAllStreams(node, nodeData, &AudioMixerSlave::throttleStream);
        }
    }

#ifdef HIFI_AUDIO_MIXER_DEBUG
    auto mixEnd = p_high_resolution_clock::now();
    auto mixTime = std::chrono::duration_cast<std::chrono::nanoseconds>(mixEnd - mixStart);
    stats.mixTime += mixTime.count();
#endif

    // check for silent audio before limiting
    // limiting uses a dither and can only guarantee abs(sample) <= 1
    bool hasAudio = false;
    for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
        if (_mixSamples[i] != 0.0f) {
            hasAudio = true;
            break;
        }
    }

    // use the per listener AudioLimiter to render the mixed data
    listenerData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    return hasAudio;
}
Esempio n. 12
0
QJsonObject AudioMixerClientData::getAudioStreamStats() {
    QJsonObject result;

    QJsonObject downstreamStats;
    AudioStreamStats streamStats = _downstreamAudioStreamStats;
    downstreamStats["desired"] = streamStats._desiredJitterBufferFrames;
    downstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
    downstreamStats["available"] = (double) streamStats._framesAvailable;
    downstreamStats["unplayed"] = (double) streamStats._unplayedMs;
    downstreamStats["starves"] = (double) streamStats._starveCount;
    downstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
    downstreamStats["overflows"] = (double) streamStats._overflowCount;
    downstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
    downstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
    downstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
    downstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
    downstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
    downstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
    downstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
    downstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);

    result["downstream"] = downstreamStats;

    AvatarAudioStream* avatarAudioStream = getAvatarAudioStream();

    if (avatarAudioStream) {
        QJsonObject upstreamStats;

        AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats();
        upstreamStats["mic.desired"] = streamStats._desiredJitterBufferFrames;
        upstreamStats["desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames();
        upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
        upstreamStats["available"] = (double) streamStats._framesAvailable;
        upstreamStats["unplayed"] = (double) streamStats._unplayedMs;
        upstreamStats["starves"] = (double) streamStats._starveCount;
        upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
        upstreamStats["overflows"] = (double) streamStats._overflowCount;
        upstreamStats["silents_dropped"] = (double) streamStats._framesDropped;
        upstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
        upstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
        upstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
        upstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
        upstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
        upstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
        upstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
        upstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);

        result["upstream"] = upstreamStats;
    } else {
        result["upstream"] = "mic unknown";
    }

    QJsonArray injectorArray;
    auto streamsCopy = getAudioStreams();
    for (auto& injectorPair : streamsCopy) {
        if (injectorPair.second->getType() == PositionalAudioStream::Injector) {
            QJsonObject upstreamStats;

            AudioStreamStats streamStats = injectorPair.second->getAudioStreamStats();
            upstreamStats["inj.desired"]  = streamStats._desiredJitterBufferFrames;
            upstreamStats["desired_calc"] = injectorPair.second->getCalculatedJitterBufferFrames();
            upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
            upstreamStats["available"] = (double) streamStats._framesAvailable;
            upstreamStats["unplayed"] = (double) streamStats._unplayedMs;
            upstreamStats["starves"] = (double) streamStats._starveCount;
            upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
            upstreamStats["overflows"] = (double) streamStats._overflowCount;
            upstreamStats["silents_dropped"] = (double) streamStats._framesDropped;
            upstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
            upstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
            upstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
            upstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
            upstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
            upstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
            upstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
            upstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);

            injectorArray.push_back(upstreamStats);
        }
    }

    result["injectors"] = injectorArray;

    return result;
}
Esempio n. 13
0
float computeGain(float masterListenerGain, const AvatarAudioStream& listeningNodeStream,
        const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, float distance, bool isEcho) {
    float gain = 1.0f;

    // injector: apply attenuation
    if (streamToAdd.getType() == PositionalAudioStream::Injector) {
        gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();

    // avatar: apply fixed off-axis attenuation to make them quieter as they turn away
    } else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
        glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;

        // source directivity is based on angle of emission, in local coordinates
        glm::vec3 direction = glm::normalize(rotatedListenerPosition);
        float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f));   // UNIT_NEG_Z is "forward"

        const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
        const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
        float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));

        gain *= offAxisCoefficient;

        // apply master gain, only to avatars
        gain *= masterListenerGain;
    }

    auto& audioZones = AudioMixer::getAudioZones();
    auto& zoneSettings = AudioMixer::getZoneSettings();

    // find distance attenuation coefficient
    float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
    for (const auto& settings : zoneSettings) {
        if (audioZones[settings.source].area.contains(streamToAdd.getPosition()) &&
            audioZones[settings.listener].area.contains(listeningNodeStream.getPosition())) {
            attenuationPerDoublingInDistance = settings.coefficient;
            break;
        }
    }

    if (attenuationPerDoublingInDistance < 0.0f) {
        // translate a negative zone setting to distance limit
        const float MIN_DISTANCE_LIMIT = ATTN_DISTANCE_REF + 1.0f;  // silent after 1m
        float distanceLimit = std::max(-attenuationPerDoublingInDistance, MIN_DISTANCE_LIMIT);

        // calculate the LINEAR attenuation using the distance to this node
        // reference attenuation of 0dB at distance = ATTN_DISTANCE_REF
        float d = distance - ATTN_DISTANCE_REF;
        gain *= std::max(1.0f - d / (distanceLimit - ATTN_DISTANCE_REF), 0.0f);
        gain = std::min(gain, ATTN_GAIN_MAX);

    } else {
        // translate a positive zone setting to gain per log2(distance)
        const float MIN_ATTENUATION_COEFFICIENT = 0.001f;   // -60dB per log2(distance)
        float g = glm::clamp(1.0f - attenuationPerDoublingInDistance, MIN_ATTENUATION_COEFFICIENT, 1.0f);

        // calculate the LOGARITHMIC attenuation using the distance to this node
        // reference attenuation of 0dB at distance = ATTN_DISTANCE_REF
        float d = (1.0f / ATTN_DISTANCE_REF) * std::max(distance, HRTF_NEARFIELD_MIN);
        gain *= fastExp2f(fastLog2f(g) * fastLog2f(d));
        gain = std::min(gain, ATTN_GAIN_MAX);
    }

    return gain;
}
Esempio n. 14
0
void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStream,
                                AvatarAudioStream& listeningNodeStream,
                                float masterListenerGain, bool isSoloing) {
    ++stats.totalMixes;

    auto streamToAdd = mixableStream.positionalStream;

    // check if this is a server echo of a source back to itself
    bool isEcho = (streamToAdd == &listeningNodeStream);

    glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream.getPosition();

    float distance = glm::max(glm::length(relativePosition), EPSILON);
    float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);

    float gain = masterListenerGain;
    if (!isSoloing) {
        gain = computeGain(masterListenerGain, listeningNodeStream, *streamToAdd, relativePosition, distance, isEcho);
    }

    const int HRTF_DATASET_INDEX = 1;

    if (!streamToAdd->lastPopSucceeded()) {
        bool forceSilentBlock = true;

        if (!streamToAdd->getLastPopOutput().isNull()) {
            bool isInjector = dynamic_cast<const InjectedAudioStream*>(streamToAdd);

            // in an injector, just go silent - the injector has likely ended
            // in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
            if (!isInjector) {
                // calculate its fade factor, which depends on how many times it's already been repeated.
                float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd->getConsecutiveNotMixedCount() - 1);
                if (fadeFactor > 0.0f) {
                    // apply the fadeFactor to the gain
                    gain *= fadeFactor;
                    forceSilentBlock = false;
                }
            }
        }

        if (forceSilentBlock) {
            // call renderSilent with a forced silent block to reduce artifacts
            // (this is not done for stereo streams since they do not go through the HRTF)
            if (!streamToAdd->isStereo() && !isEcho) {
                static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
                mixableStream.hrtf->render(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                                           AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

                ++stats.hrtfRenders;
            }

            return;
        }
    }

    // grab the stream from the ring buffer
    AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();

    // stereo sources are not passed through HRTF
    if (streamToAdd->isStereo()) {

        // apply the avatar gain adjustment
        gain *= mixableStream.hrtf->getGainAdjustment();

        const float scale = 1 / 32768.0f; // int16_t to float

        for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) {
            _mixSamples[2*i+0] += (float)streamPopOutput[2*i+0] * gain * scale;
            _mixSamples[2*i+1] += (float)streamPopOutput[2*i+1] * gain * scale;
        }

        ++stats.manualStereoMixes;
    } else if (isEcho) {
        // echo sources are not passed through HRTF

        const float scale = 1/32768.0f; // int16_t to float

        for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) {
            float sample = (float)streamPopOutput[i] * gain * scale;
            _mixSamples[2*i+0] += sample;
            _mixSamples[2*i+1] += sample;
        }

        ++stats.manualEchoMixes;
    } else {
        streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        mixableStream.hrtf->render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                                   AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++stats.hrtfRenders;
    }
}
Esempio n. 15
0
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
    // Send stream properties
    bool hasReverb = false;
    float reverbTime, wetLevel;
    // find reverb properties
    for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
        AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
        glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition();
        AABox box = _audioZones[_zoneReverbSettings[i].zone];
        if (box.contains(streamPosition)) {
            hasReverb = true;
            reverbTime = _zoneReverbSettings[i].reverbTime;
            wetLevel = _zoneReverbSettings[i].wetLevel;

            // Modulate wet level with distance to wall
            float MIN_ATTENUATION_DISTANCE = 2.0f;
            float MAX_ATTENUATION = -12; // dB
            glm::vec3 distanceToWalls = (box.getDimensions() / 2.0f) - glm::abs(streamPosition - box.calcCenter());
            float distanceToClosestWall = glm::min(distanceToWalls.x, distanceToWalls.z);
            if (distanceToClosestWall < MIN_ATTENUATION_DISTANCE) {
                wetLevel += MAX_ATTENUATION * (1.0f - distanceToClosestWall / MIN_ATTENUATION_DISTANCE);
            }
            break;
        }
    }
    
    AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
    AvatarAudioStream* stream = nodeData->getAvatarAudioStream();
    bool dataChanged = (stream->hasReverb() != hasReverb) ||
    (stream->hasReverb() && (stream->getRevebTime() != reverbTime ||
                             stream->getWetLevel() != wetLevel));
    if (dataChanged) {
        // Update stream
        if (hasReverb) {
            stream->setReverb(reverbTime, wetLevel);
        } else {
            stream->clearReverb();
        }
    }

    // Send at change or every so often
    float CHANCE_OF_SEND = 0.01f;
    bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);

    if (sendData) {
        auto nodeList = DependencyManager::get<NodeList>();

        unsigned char bitset = 0;

        int packetSize = sizeof(bitset);

        if (hasReverb) {
            packetSize += sizeof(reverbTime) + sizeof(wetLevel);
        }

        auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);

        if (hasReverb) {
            setAtBit(bitset, HAS_REVERB_BIT);
        }

        envPacket->writePrimitive(bitset);

        if (hasReverb) {
            envPacket->writePrimitive(reverbTime);
            envPacket->writePrimitive(wetLevel);
        }
        nodeList->sendPacket(std::move(envPacket), *node);
    }
}
Esempio n. 16
0
void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
                                                          const PositionalAudioStream& streamToAdd,
                                                          const QUuid& sourceNodeID,
                                                          const AvatarAudioStream& listeningNodeStream) {


    // to reduce artifacts we calculate the gain and azimuth for every source for this listener
    // even if we are not going to end up mixing in this source

    ++_totalMixes;

    // this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct

    // check if this is a server echo of a source back to itself
    bool isEcho = (&streamToAdd == &listeningNodeStream);

    glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();

    // figure out the distance between source and listener
    float distance = glm::max(glm::length(relativePosition), EPSILON);

    // figure out the gain for this source at the listener
    float gain = gainForSource(streamToAdd, listeningNodeStream, relativePosition, isEcho);

    // figure out the azimuth to this source at the listener
    float azimuth = isEcho ? 0.0f : azimuthForSource(streamToAdd, listeningNodeStream, relativePosition);

    float repeatedFrameFadeFactor = 1.0f;

    static const int HRTF_DATASET_INDEX = 1;

    if (!streamToAdd.lastPopSucceeded()) {
        bool forceSilentBlock = true;

        if (_streamSettings._repetitionWithFade && !streamToAdd.getLastPopOutput().isNull()) {

            // reptition with fade is enabled, and we do have a valid previous frame to repeat
            // so we mix the previously-mixed block

            // this is preferable to not mixing it at all to avoid the harsh jump to silence

            // we'll repeat the last block until it has a block to mix
            // and we'll gradually fade that repeated block into silence.

            // calculate its fade factor, which depends on how many times it's already been repeated.

            repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
            if (repeatedFrameFadeFactor > 0.0f) {
                // apply the repeatedFrameFadeFactor to the gain
                gain *= repeatedFrameFadeFactor;

                forceSilentBlock = false;
            }
        }

        if (forceSilentBlock) {
            // we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
            // in this case we will call renderSilent with a forced silent block
            // this ensures the correct tail from the previously mixed block and the correct spatialization of first block
            // of any upcoming audio

            if (!streamToAdd.isStereo() && !isEcho) {
                // get the existing listener-source HRTF object, or create a new one
                auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());

                // this is not done for stereo streams since they do not go through the HRTF
                static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
                hrtf.renderSilent(silentMonoBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                                  AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

                ++_hrtfSilentRenders;;
            }

            return;
        }
    }

    // grab the stream from the ring buffer
    AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();

    if (streamToAdd.isStereo() || isEcho) {
        // this is a stereo source or server echo so we do not pass it through the HRTF
        // simply apply our calculated gain to each sample
        if (streamToAdd.isStereo()) {
            for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
                _mixedSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
            }

            ++_manualStereoMixes;
        } else {
            for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
                auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
                _mixedSamples[i] += monoSample;
                _mixedSamples[i + 1] += monoSample;
            }

            ++_manualEchoMixes;
        }

        return;
    }

    // get the existing listener-source HRTF object, or create a new one
    auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());

    static int16_t streamBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL];

    streamPopOutput.readSamples(streamBlock, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    // if the frame we're about to mix is silent, simply call render silent and move on
    if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
        // silent frame from source

        // we still need to call renderSilent via the HRTF for mono source
        hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                          AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++_hrtfSilentRenders;

        return;
    }

    if (_performanceThrottlingRatio > 0.0f
        && streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= _minAudibilityThreshold) {
        // the mixer is struggling so we're going to drop off some streams

        // we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
        hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
                          AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++_hrtfStruggleRenders;

        return;
    }

    ++_hrtfRenders;

    // mono stream, call the HRTF with our block and calculated azimuth and gain
    hrtf.render(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}