예제 #1
0
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
        const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) {
    float gain = 1.0f;

    // injector: apply attenuation
    if (streamToAdd.getType() == PositionalAudioStream::Injector) {
        gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();

    // avatar: apply fixed off-axis attenuation to make them quieter as they turn away
    } else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
        glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;

        // source directivity is based on angle of emission, in local coordinates
        glm::vec3 direction = glm::normalize(rotatedListenerPosition);
        float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f));   // UNIT_NEG_Z is "forward"

        const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
        const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
        float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));

        gain *= offAxisCoefficient;

        // apply master gain, only to avatars
        gain *= listenerNodeData.getMasterAvatarGain();
    }

    auto& audioZones = AudioMixer::getAudioZones();
    auto& zoneSettings = AudioMixer::getZoneSettings();

    // find distance attenuation coefficient
    float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
    for (int i = 0; i < zoneSettings.length(); ++i) {
        if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
            audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
            attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
            break;
        }
    }

    // distance attenuation
    const float ATTENUATION_START_DISTANCE = 1.0f;
    float distance = glm::length(relativePosition);
    assert(ATTENUATION_START_DISTANCE > EPSILON);
    if (distance >= ATTENUATION_START_DISTANCE) {

        // translate the zone setting to gain per log2(distance)
        float g = 1.0f - attenuationPerDoublingInDistance;
        g = glm::clamp(g, EPSILON, 1.0f);

        // calculate the distance coefficient using the distance to this node
        float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distance/ATTENUATION_START_DISTANCE));

        // multiply the current attenuation coefficient by the distance coefficient
        gain *= distanceCoefficient;
    }

    return gain;
}
예제 #2
0
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
    AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
    AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());

    // zero out the mix for this listener
    memset(_mixSamples, 0, sizeof(_mixSamples));

    bool isThrottling = _numToRetain != -1;
    bool isSoloing = !listenerData->getSoloedNodes().empty();

    auto& streams = listenerData->getStreams();

    addStreams(*listener, *listenerData);

    // Process skipped streams
    erase_if(streams.skipped, [&](MixableStream& stream) {
        if (shouldBeRemoved(stream, _sharedData)) {
            return true;
        }

        if (!shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
            if (shouldBeInactive(stream)) {
                streams.inactive.push_back(move(stream));
                ++stats.skippedToInactive;
            } else {
                streams.active.push_back(move(stream));
                ++stats.skippedToActive;
            }
            return true;
        }

        if (!isThrottling) {
            updateHRTFParameters(stream, *listenerAudioStream,
                                 listenerData->getMasterAvatarGain());
        }
        return false;
    });

    // Process inactive streams
    erase_if(streams.inactive, [&](MixableStream& stream) {
        if (shouldBeRemoved(stream, _sharedData)) {
            return true;
        }

        if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
            streams.skipped.push_back(move(stream));
            ++stats.inactiveToSkipped;
            return true;
        }

        if (!shouldBeInactive(stream)) {
            streams.active.push_back(move(stream));
            ++stats.inactiveToActive;
            return true;
        }

        if (!isThrottling) {
            updateHRTFParameters(stream, *listenerAudioStream,
                                 listenerData->getMasterAvatarGain());
        }
        return false;
    });

    // Process active streams
    erase_if(streams.active, [&](MixableStream& stream) {
        if (shouldBeRemoved(stream, _sharedData)) {
            return true;
        }

        if (isThrottling) {
            // we're throttling, so we need to update the approximate volume for any un-skipped streams
            // unless this is simply for an echo (in which case the approx volume is 1.0)
            stream.approximateVolume = approximateVolume(stream, listenerAudioStream);
        } else {
            if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
                addStream(stream, *listenerAudioStream, 0.0f, isSoloing);
                streams.skipped.push_back(move(stream));
                ++stats.activeToSkipped;
                return true;
            }

            addStream(stream, *listenerAudioStream, listenerData->getMasterAvatarGain(),
                      isSoloing);

            if (shouldBeInactive(stream)) {
                // To reduce artifacts we still call render to flush the HRTF for every silent
                // sources on the first frame where the source becomes silent
                // this ensures the correct tail from last mixed block
                streams.inactive.push_back(move(stream));
                ++stats.activeToInactive;
                return true;
            }
        }

        return false;
    });

    if (isThrottling) {
        // since we're throttling, we need to partition the mixable into throttled and unthrottled streams
        int numToRetain = min(_numToRetain, (int)streams.active.size()); // Make sure we don't overflow
        auto throttlePoint = begin(streams.active) + numToRetain;

        std::nth_element(streams.active.begin(), throttlePoint, streams.active.end(),
                         [](const auto& a, const auto& b)
                         {
                             return a.approximateVolume > b.approximateVolume;
                         });

        SegmentedEraseIf<MixableStreamsVector> erase(streams.active);
        erase.iterateTo(throttlePoint, [&](MixableStream& stream) {
            if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
                resetHRTFState(stream);
                streams.skipped.push_back(move(stream));
                ++stats.activeToSkipped;
                return true;
            }

            addStream(stream, *listenerAudioStream, listenerData->getMasterAvatarGain(),
                      isSoloing);

            if (shouldBeInactive(stream)) {
                // To reduce artifacts we still call render to flush the HRTF for every silent
                // sources on the first frame where the source becomes silent
                // this ensures the correct tail from last mixed block
                streams.inactive.push_back(move(stream));
                ++stats.activeToInactive;
                return true;
            }

            return false;
        });
        erase.iterateTo(end(streams.active), [&](MixableStream& stream) {
            // To reduce artifacts we reset the HRTF state for every throttled
            // sources on the first frame where the source becomes throttled
            // this ensures at least remove the tail from last mixed block
            // preventing excessive artifacts on the next first block
            resetHRTFState(stream);

            if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
                streams.skipped.push_back(move(stream));
                ++stats.activeToSkipped;
                return true;
            }

            if (shouldBeInactive(stream)) {
                streams.inactive.push_back(move(stream));
                ++stats.activeToInactive;
                return true;
            }

            return false;
        });
    }

    stats.skipped += (int)streams.skipped.size();
    stats.inactive += (int)streams.inactive.size();
    stats.active += (int)streams.active.size();

    // clear the newly ignored, un-ignored, ignoring, and un-ignoring streams now that we've processed them
    listenerData->clearStagedIgnoreChanges();

#ifdef HIFI_AUDIO_MIXER_DEBUG
    auto mixEnd = p_high_resolution_clock::now();
    auto mixTime = std::chrono::duration_cast<std::chrono::nanoseconds>(mixEnd - mixStart);
    stats.mixTime += mixTime.count();
#endif

    // check for silent audio before limiting
    // limiting uses a dither and can only guarantee abs(sample) <= 1
    bool hasAudio = false;
    for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
        if (_mixSamples[i] != 0.0f) {
            hasAudio = true;
            break;
        }
    }

    // use the per listener AudioLimiter to render the mixed data
    listenerData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    return hasAudio;
}