Ejemplo n.º 1
0
void AudioMixerSlave::processPackets(const SharedNodePointer& node) {
    AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
    if (data) {
        // process packets and collect the number of streams available for this frame
        stats.sumStreams += data->processPackets(_sharedData.addedStreams);
    }
}
Ejemplo n.º 2
0
int AudioMixer::prepareMixForListeningNode(Node* node) {
    AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
    
    // zero out the client mix for this node
    memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);

    // loop through all other nodes that have sufficient audio to mix
    int streamsMixed = 0;
    foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
        if (otherNode->getLinkedData()) {
            AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();

            // enumerate the ARBs attached to the otherNode and add all that should be added to mix

            const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
            QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
            for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
                PositionalAudioStream* otherNodeStream = i.value();
                
                if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
                    streamsMixed += addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
                }
            }
        }
    }
    return streamsMixed;
}
Ejemplo n.º 3
0
void AudioMixer::prepareMixForListeningNode(Node* node) {
    AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();

    // zero out the client mix for this node
    memset(_clientSamples, 0, sizeof(_clientSamples));

    // loop through all other nodes that have sufficient audio to mix
    foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
        if (otherNode->getLinkedData()) {

            AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();

            // enumerate the ARBs attached to the otherNode and add all that should be added to mix
            for (unsigned int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) {
                PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];

                if ((*otherNode != *node
                     || otherNodeBuffer->shouldLoopbackForNode())
                    && otherNodeBuffer->willBeAddedToMix()) {
                    addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
                }
            }
        }
    }
}
Ejemplo n.º 4
0
void AudioMixer::perSecondActions() {
    _sendAudioStreamStats = true;

    int callsLastSecond = _datagramsReadPerCallStats.getCurrentIntervalSamples();
    _readPendingCallsPerSecondStats.update(callsLastSecond);

    if (_printStreamStats) {

        printf("\n================================================================================\n\n");

        printf("            readPendingDatagram() calls per second | avg: %.2f, avg_30s: %.2f, last_second: %d\n",
            _readPendingCallsPerSecondStats.getAverage(),
            _readPendingCallsPerSecondStats.getWindowAverage(),
            callsLastSecond);

        printf("                           Datagrams read per call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
            _datagramsReadPerCallStats.getAverage(),
            _datagramsReadPerCallStats.getWindowAverage(),
            _datagramsReadPerCallStats.getCurrentIntervalAverage());

        printf("        Usecs spent per readPendingDatagram() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
            _timeSpentPerCallStats.getAverage(),
            _timeSpentPerCallStats.getWindowAverage(),
            _timeSpentPerCallStats.getCurrentIntervalAverage());

        printf("  Usecs spent per packetVersionAndHashMatch() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
            _timeSpentPerHashMatchCallStats.getAverage(),
            _timeSpentPerHashMatchCallStats.getWindowAverage(),
            _timeSpentPerHashMatchCallStats.getCurrentIntervalAverage());

        double WINDOW_LENGTH_USECS = READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND;

        printf("       %% time spent in readPendingDatagram() calls | avg_30s: %.6f%%, last_second: %.6f%%\n",
            _timeSpentPerCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
            _timeSpentPerCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);

        printf("%% time spent in packetVersionAndHashMatch() calls: | avg_30s: %.6f%%, last_second: %.6f%%\n",
            _timeSpentPerHashMatchCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
            _timeSpentPerHashMatchCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);

        DependencyManager::get<NodeList>()->eachNode([](const SharedNodePointer& node) {
            if (node->getLinkedData()) {
                AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();

                if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
                    printf("\nStats for agent %s --------------------------------\n",
                        node->getUUID().toString().toLatin1().data());
                    nodeData->printUpstreamDownstreamStats();
                }
            }
        });
    }

    _datagramsReadPerCallStats.currentIntervalComplete();
    _timeSpentPerCallStats.currentIntervalComplete();
    _timeSpentPerHashMatchCallStats.currentIntervalComplete();
}
Ejemplo n.º 5
0
void AudioMixer::sendStatsPacket() {
    static QJsonObject statsObject;

    statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
    statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
    statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;

    statsObject["avg_listeners_per_frame"] = (float) _sumListeners / (float) _numStatFrames;

    QJsonObject mixStats;
    mixStats["%_hrtf_mixes"] = percentageForMixStats(_hrtfRenders);
    mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_hrtfSilentRenders);
    mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_hrtfStruggleRenders);
    mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_manualStereoMixes);
    mixStats["%_manual_echo_mixes"] = percentageForMixStats(_manualEchoMixes);

    mixStats["total_mixes"] = _totalMixes;
    mixStats["avg_mixes_per_block"] = _totalMixes / _numStatFrames;

    statsObject["mix_stats"] = mixStats;

    _sumListeners = 0;
    _hrtfRenders = 0;
    _hrtfSilentRenders = 0;
    _hrtfStruggleRenders = 0;
    _manualStereoMixes = 0;
    _manualEchoMixes = 0;
    _totalMixes = 0;
    _numStatFrames = 0;

    // add stats for each listerner
    auto nodeList = DependencyManager::get<NodeList>();
    QJsonObject listenerStats;

    nodeList->eachNode([&](const SharedNodePointer& node) {
        AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
        if (clientData) {
            QJsonObject nodeStats;
            QString uuidString = uuidStringWithoutCurlyBraces(node->getUUID());

            nodeStats["outbound_kbps"] = node->getOutboundBandwidth();
            nodeStats[USERNAME_UUID_REPLACEMENT_STATS_KEY] = uuidString;

            nodeStats["jitter"] = clientData->getAudioStreamStats();

            listenerStats[uuidString] = nodeStats;
        }
    });

    // add the listeners object to the root object
    statsObject["z_listeners"] = listenerStats;

    // send off the stats packets
    ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
}
Ejemplo n.º 6
0
void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
    const int SILENT_PACKET_SIZE =
        sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + sizeof(quint16);
    quint16 sequence = data.getOutgoingSequenceNumber();
    QString codec = data.getCodecName();
    auto mixPacket = createAudioPacket(PacketType::SilentAudioFrame, SILENT_PACKET_SIZE, sequence, codec);

    // pack number of samples
    mixPacket->writePrimitive(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);

    // send packet
    DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
    data.incrementOutgoingMixedAudioSequenceNumber();
}
Ejemplo n.º 7
0
void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QByteArray& buffer) {
    const int MIX_PACKET_SIZE =
        sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
    quint16 sequence = data.getOutgoingSequenceNumber();
    QString codec = data.getCodecName();
    auto mixPacket = createAudioPacket(PacketType::MixedAudio, MIX_PACKET_SIZE, sequence, codec);

    // pack samples
    mixPacket->write(buffer.constData(), buffer.size());

    // send packet
    DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
    data.incrementOutgoingMixedAudioSequenceNumber();
}
Ejemplo n.º 8
0
void sendMutePacket(const SharedNodePointer& node, AudioMixerClientData& data) {
    auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
    DependencyManager::get<NodeList>()->sendPacket(std::move(mutePacket), *node);

    // probably now we just reset the flag, once should do it (?)
    data.setShouldMuteClient(false);
}
Ejemplo n.º 9
0
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
    bool hasReverb = false;
    float reverbTime, wetLevel;

    auto& reverbSettings = AudioMixer::getReverbSettings();
    auto& audioZones = AudioMixer::getAudioZones();

    AvatarAudioStream* stream = data.getAvatarAudioStream();
    glm::vec3 streamPosition = stream->getPosition();

    // find reverb properties
    for (int i = 0; i < reverbSettings.size(); ++i) {
        AABox box = audioZones[reverbSettings[i].zone];
        if (box.contains(streamPosition)) {
            hasReverb = true;
            reverbTime = reverbSettings[i].reverbTime;
            wetLevel = reverbSettings[i].wetLevel;
            break;
        }
    }

    // check if data changed
    bool dataChanged = (stream->hasReverb() != hasReverb) ||
        (stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel));
    if (dataChanged) {
        // update stream
        if (hasReverb) {
            stream->setReverb(reverbTime, wetLevel);
        } else {
            stream->clearReverb();
        }
    }

    // send packet at change or every so often
    float CHANCE_OF_SEND = 0.01f;
    bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);

    if (sendData) {
        // size the packet
        unsigned char bitset = 0;
        int packetSize = sizeof(bitset);
        if (hasReverb) {
            packetSize += sizeof(reverbTime) + sizeof(wetLevel);
        }

        // write the packet
        auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
        if (hasReverb) {
            setAtBit(bitset, HAS_REVERB_BIT);
        }
        envPacket->writePrimitive(bitset);
        if (hasReverb) {
            envPacket->writePrimitive(reverbTime);
            envPacket->writePrimitive(wetLevel);
        }

        // send the packet
        DependencyManager::get<NodeList>()->sendPacket(std::move(envPacket), *node);
    }
}
Ejemplo n.º 10
0
void AudioMixerSlave::mix(const SharedNodePointer& node) {
    // check that the node is valid
    AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
    if (data == nullptr) {
        return;
    }

    if (node->isUpstream()) {
        return;
    }

    // check that the stream is valid
    auto avatarStream = data->getAvatarAudioStream();
    if (avatarStream == nullptr) {
        return;
    }

    // send mute packet, if necessary
    if (AudioMixer::shouldMute(avatarStream->getQuietestFrameLoudness()) || data->shouldMuteClient()) {
        sendMutePacket(node, *data);
    }

    // send audio packets, if necessary
    if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
        ++stats.sumListeners;

        // mix the audio
        bool mixHasAudio = prepareMix(node);

        // send audio packet
        if (mixHasAudio || data->shouldFlushEncoder()) {
            QByteArray encodedBuffer;
            if (mixHasAudio) {
                // encode the audio
                QByteArray decodedBuffer(reinterpret_cast<char*>(_bufferSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
                data->encode(decodedBuffer, encodedBuffer);
            } else {
                // time to flush (resets shouldFlush until the next encode)
                data->encodeFrameOfZeros(encodedBuffer);
            }

            sendMixPacket(node, *data, encodedBuffer);
        } else {
            ++stats.sumListenersSilent;
            sendSilentPacket(node, *data);
        }

        // send environment packet
        sendEnvironmentPacket(node, *data);

        // send stats packet (about every second)
        const unsigned int NUM_FRAMES_PER_SEC = (int)ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
        if (data->shouldSendStats(_frame % NUM_FRAMES_PER_SEC)) {
            data->sendAudioStreamStatsPackets(node);
        }
    }
}
Ejemplo n.º 11
0
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
        const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) {
    float gain = 1.0f;

    // injector: apply attenuation
    if (streamToAdd.getType() == PositionalAudioStream::Injector) {
        gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();

    // avatar: apply fixed off-axis attenuation to make them quieter as they turn away
    } else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
        glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;

        // source directivity is based on angle of emission, in local coordinates
        glm::vec3 direction = glm::normalize(rotatedListenerPosition);
        float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f));   // UNIT_NEG_Z is "forward"

        const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
        const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
        float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));

        gain *= offAxisCoefficient;

        // apply master gain, only to avatars
        gain *= listenerNodeData.getMasterAvatarGain();
    }

    auto& audioZones = AudioMixer::getAudioZones();
    auto& zoneSettings = AudioMixer::getZoneSettings();

    // find distance attenuation coefficient
    float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
    for (int i = 0; i < zoneSettings.length(); ++i) {
        if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
            audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
            attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
            break;
        }
    }

    // distance attenuation
    const float ATTENUATION_START_DISTANCE = 1.0f;
    float distance = glm::length(relativePosition);
    assert(ATTENUATION_START_DISTANCE > EPSILON);
    if (distance >= ATTENUATION_START_DISTANCE) {

        // translate the zone setting to gain per log2(distance)
        float g = 1.0f - attenuationPerDoublingInDistance;
        g = glm::clamp(g, EPSILON, 1.0f);

        // calculate the distance coefficient using the distance to this node
        float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distance/ATTENUATION_START_DISTANCE));

        // multiply the current attenuation coefficient by the distance coefficient
        gain *= distanceCoefficient;
    }

    return gain;
}
Ejemplo n.º 12
0
bool AudioMixer::prepareMixForListeningNode(Node* node) {
    AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
    AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());

    // zero out the client mix for this node
    memset(_mixedSamples, 0, sizeof(_mixedSamples));

    // loop through all other nodes that have sufficient audio to mix

    DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
        // make sure that we have audio data for this other node and that it isn't being ignored by our listening node
        if (otherNode->getLinkedData() && !node->isIgnoringNodeWithID(otherNode->getUUID())) {
            AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();

            // enumerate the ARBs attached to the otherNode and add all that should be added to mix
            auto streamsCopy = otherNodeClientData->getAudioStreams();

            for (auto& streamPair : streamsCopy) {

                auto otherNodeStream = streamPair.second;

                if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
                    addStreamToMixForListeningNodeWithStream(*listenerNodeData, *otherNodeStream, otherNode->getUUID(),
                                                             *nodeAudioStream);
                }
            }
        }
    });

    // use the per listner AudioLimiter to render the mixed data...
    listenerNodeData->audioLimiter.render(_mixedSamples, _clampedSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    // check for silent audio after the peak limitor has converted the samples
    bool hasAudio = false;
    for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
        if (_clampedSamples[i] != 0) {
            hasAudio = true;
            break;
        }
    }
    return hasAudio;
}
Ejemplo n.º 13
0
int AudioMixer::prepareMixForListeningNode(Node* node) {
    AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
    AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());

    // zero out the client mix for this node
    memset(_preMixSamples, 0, sizeof(_preMixSamples));
    memset(_mixSamples, 0, sizeof(_mixSamples));

    // loop through all other nodes that have sufficient audio to mix
    int streamsMixed = 0;

    DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
        if (otherNode->getLinkedData()) {
            AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();

            // enumerate the ARBs attached to the otherNode and add all that should be added to mix

            const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
            QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
            for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
                PositionalAudioStream* otherNodeStream = i.value();
                QUuid streamUUID = i.key();

                if (otherNodeStream->getType() == PositionalAudioStream::Microphone) {
                    streamUUID = otherNode->getUUID();
                }

                if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
                    streamsMixed += addStreamToMixForListeningNodeWithStream(listenerNodeData, streamUUID,
                                                                             otherNodeStream, nodeAudioStream);
                }
            }
        }
    });

    return streamsMixed;
}
Ejemplo n.º 14
0
bool AudioMixerClientData::shouldIgnore(const SharedNodePointer self, const SharedNodePointer node, unsigned int frame) {
    // this is symmetric over self / node; if computed, it is cached in the other

    // check the cache to avoid computation
    auto& cache = _nodeSourcesIgnoreMap[node->getUUID()];
    if (cache.isCached()) {
        return cache.shouldIgnore();
    }

    AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
    if (!nodeData) {
        return false;
    }

    // compute shouldIgnore
    bool shouldIgnore = true;
    if ( // the nodes are not ignoring each other explicitly (or are but get data regardless)
            (!self->isIgnoringNodeWithID(node->getUUID()) ||
             (nodeData->getRequestsDomainListData() && node->getCanKick())) &&
            (!node->isIgnoringNodeWithID(self->getUUID()) ||
             (getRequestsDomainListData() && self->getCanKick())))  {

        // if either node is enabling an ignore radius, check their proximity
        if ((self->isIgnoreRadiusEnabled() || node->isIgnoreRadiusEnabled())) {
            auto& zone = _ignoreZone.get(frame);
            auto& nodeZone = nodeData->_ignoreZone.get(frame);
            shouldIgnore = zone.touches(nodeZone);
        } else {
            shouldIgnore = false;
        }
    }

    // cache in node
    nodeData->_nodeSourcesIgnoreMap[self->getUUID()].cache(shouldIgnore);

    return shouldIgnore;
}
Ejemplo n.º 15
0
bool shouldBeSkipped(MixableStream& stream, const Node& listener,
                     const AvatarAudioStream& listenerAudioStream,
                     const AudioMixerClientData& listenerData) {

    if (stream.nodeStreamID.nodeLocalID == listener.getLocalID()) {
        return !stream.positionalStream->shouldLoopbackForNode();
    }

    // grab the unprocessed ignores and unignores from and for this listener
    const auto& nodesIgnoredByListener = listenerData.getNewIgnoredNodeIDs();
    const auto& nodesUnignoredByListener = listenerData.getNewUnignoredNodeIDs();
    const auto& nodesIgnoringListener = listenerData.getNewIgnoringNodeIDs();
    const auto& nodesUnignoringListener = listenerData.getNewUnignoringNodeIDs();

    // this stream was previously not ignored by the listener and we have some newly ignored streams
    // check now if it is one of the ignored streams and flag it as such
    if (stream.ignoredByListener) {
        stream.ignoredByListener = !contains(nodesUnignoredByListener, stream.nodeStreamID.nodeID);
    } else {
        stream.ignoredByListener = contains(nodesIgnoredByListener, stream.nodeStreamID.nodeID);
    }

    if (stream.ignoringListener) {
        stream.ignoringListener = !contains(nodesUnignoringListener, stream.nodeStreamID.nodeID);
    } else {
        stream.ignoringListener = contains(nodesIgnoringListener, stream.nodeStreamID.nodeID);
    }

    bool listenerIsAdmin = listenerData.getRequestsDomainListData() && listener.getCanKick();
    if (stream.ignoredByListener || (stream.ignoringListener && !listenerIsAdmin)) {
        return true;
    }

    if (!listenerData.getSoloedNodes().empty()) {
        return !contains(listenerData.getSoloedNodes(), stream.nodeStreamID.nodeID);
    }

    bool shouldCheckIgnoreBox = (listenerAudioStream.isIgnoreBoxEnabled() ||
                                 stream.positionalStream->isIgnoreBoxEnabled());
    if (shouldCheckIgnoreBox &&
        listenerAudioStream.getIgnoreBox().touches(stream.positionalStream->getIgnoreBox())) {
        return true;
    }

    return false;
};
Ejemplo n.º 16
0
void AudioMixerSlave::addStreams(Node& listener, AudioMixerClientData& listenerData) {
    auto& ignoredNodeIDs = listener.getIgnoredNodeIDs();
    auto& ignoringNodeIDs = listenerData.getIgnoringNodeIDs();

    auto& streams = listenerData.getStreams();

    // add data for newly created streams to our vector
    if (!listenerData.getHasReceivedFirstMix()) {
        // when this listener is new, we need to fill its added streams object with all available streams
        std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
            AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
            if (nodeData) {
                for (auto& stream : nodeData->getAudioStreams()) {
                    bool ignoredByListener = contains(ignoredNodeIDs, node->getUUID());
                    bool ignoringListener = contains(ignoringNodeIDs, node->getUUID());

                    if (ignoredByListener || ignoringListener) {
                        streams.skipped.emplace_back(node->getUUID(), node->getLocalID(),
                                                    stream->getStreamIdentifier(), stream.get());

                        // pre-populate ignored and ignoring flags for this stream
                        streams.skipped.back().ignoredByListener = ignoredByListener;
                        streams.skipped.back().ignoringListener = ignoringListener;
                    } else {
                        streams.active.emplace_back(node->getUUID(), node->getLocalID(),
                                                     stream->getStreamIdentifier(), stream.get());
                    }
                }
            }
        });

        // flag this listener as having received their first mix so we know we don't need to enumerate all nodes again
        listenerData.setHasReceivedFirstMix(true);
    } else {
        for (const auto& newStream : _sharedData.addedStreams) {
            bool ignoredByListener = contains(ignoredNodeIDs, newStream.nodeIDStreamID.nodeID);
            bool ignoringListener = contains(ignoringNodeIDs, newStream.nodeIDStreamID.nodeID);

            if (ignoredByListener || ignoringListener) {
                streams.skipped.emplace_back(newStream.nodeIDStreamID, newStream.positionalStream);

                // pre-populate ignored and ignoring flags for this stream
                streams.skipped.back().ignoredByListener = ignoredByListener;
                streams.skipped.back().ignoringListener = ignoringListener;
            } else {
                streams.active.emplace_back(newStream.nodeIDStreamID, newStream.positionalStream);
            }
        }
    }
}
Ejemplo n.º 17
0
void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
                                                          const PositionalAudioStream& streamToAdd,
                                                          const QUuid& sourceNodeID,
                                                          const AvatarAudioStream& listeningNodeStream) {


    // to reduce artifacts we calculate the gain and azimuth for every source for this listener
    // even if we are not going to end up mixing in this source

    ++_totalMixes;

    // this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct

    // check if this is a server echo of a source back to itself
    bool isEcho = (&streamToAdd == &listeningNodeStream);

    glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();

    // figure out the distance between source and listener
    float distance = glm::max(glm::length(relativePosition), EPSILON);

    // figure out the gain for this source at the listener
    float gain = gainForSource(streamToAdd, listeningNodeStream, relativePosition, isEcho);

    // figure out the azimuth to this source at the listener
    float azimuth = isEcho ? 0.0f : azimuthForSource(streamToAdd, listeningNodeStream, relativePosition);

    float repeatedFrameFadeFactor = 1.0f;

    static const int HRTF_DATASET_INDEX = 1;

    if (!streamToAdd.lastPopSucceeded()) {
        bool forceSilentBlock = true;

        if (_streamSettings._repetitionWithFade && !streamToAdd.getLastPopOutput().isNull()) {

            // reptition with fade is enabled, and we do have a valid previous frame to repeat
            // so we mix the previously-mixed block

            // this is preferable to not mixing it at all to avoid the harsh jump to silence

            // we'll repeat the last block until it has a block to mix
            // and we'll gradually fade that repeated block into silence.

            // calculate its fade factor, which depends on how many times it's already been repeated.

            repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
            if (repeatedFrameFadeFactor > 0.0f) {
                // apply the repeatedFrameFadeFactor to the gain
                gain *= repeatedFrameFadeFactor;

                forceSilentBlock = false;
            }
        }

        if (forceSilentBlock) {
            // we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
            // in this case we will call renderSilent with a forced silent block
            // this ensures the correct tail from the previously mixed block and the correct spatialization of first block
            // of any upcoming audio

            if (!streamToAdd.isStereo() && !isEcho) {
                // get the existing listener-source HRTF object, or create a new one
                auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());

                // this is not done for stereo streams since they do not go through the HRTF
                static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
                hrtf.renderSilent(silentMonoBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                                  AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

                ++_hrtfSilentRenders;;
            }

            return;
        }
    }

    // grab the stream from the ring buffer
    AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();

    if (streamToAdd.isStereo() || isEcho) {
        // this is a stereo source or server echo so we do not pass it through the HRTF
        // simply apply our calculated gain to each sample
        if (streamToAdd.isStereo()) {
            for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
                _mixedSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
            }

            ++_manualStereoMixes;
        } else {
            for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
                auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
                _mixedSamples[i] += monoSample;
                _mixedSamples[i + 1] += monoSample;
            }

            ++_manualEchoMixes;
        }

        return;
    }

    // get the existing listener-source HRTF object, or create a new one
    auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());

    static int16_t streamBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL];

    streamPopOutput.readSamples(streamBlock, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    // if the frame we're about to mix is silent, simply call render silent and move on
    if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
        // silent frame from source

        // we still need to call renderSilent via the HRTF for mono source
        hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                          AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++_hrtfSilentRenders;

        return;
    }

    if (_performanceThrottlingRatio > 0.0f
        && streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= _minAudibilityThreshold) {
        // the mixer is struggling so we're going to drop off some streams

        // we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
        hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
                          AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++_hrtfStruggleRenders;

        return;
    }

    ++_hrtfRenders;

    // mono stream, call the HRTF with our block and calculated azimuth and gain
    hrtf.render(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
Ejemplo n.º 18
0
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
    // Send stream properties
    bool hasReverb = false;
    float reverbTime, wetLevel;
    // find reverb properties
    for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
        AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
        glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition();
        AABox box = _audioZones[_zoneReverbSettings[i].zone];
        if (box.contains(streamPosition)) {
            hasReverb = true;
            reverbTime = _zoneReverbSettings[i].reverbTime;
            wetLevel = _zoneReverbSettings[i].wetLevel;

            // Modulate wet level with distance to wall
            float MIN_ATTENUATION_DISTANCE = 2.0f;
            float MAX_ATTENUATION = -12; // dB
            glm::vec3 distanceToWalls = (box.getDimensions() / 2.0f) - glm::abs(streamPosition - box.calcCenter());
            float distanceToClosestWall = glm::min(distanceToWalls.x, distanceToWalls.z);
            if (distanceToClosestWall < MIN_ATTENUATION_DISTANCE) {
                wetLevel += MAX_ATTENUATION * (1.0f - distanceToClosestWall / MIN_ATTENUATION_DISTANCE);
            }
            break;
        }
    }
    
    AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
    AvatarAudioStream* stream = nodeData->getAvatarAudioStream();
    bool dataChanged = (stream->hasReverb() != hasReverb) ||
    (stream->hasReverb() && (stream->getRevebTime() != reverbTime ||
                             stream->getWetLevel() != wetLevel));
    if (dataChanged) {
        // Update stream
        if (hasReverb) {
            stream->setReverb(reverbTime, wetLevel);
        } else {
            stream->clearReverb();
        }
    }

    // Send at change or every so often
    float CHANCE_OF_SEND = 0.01f;
    bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);

    if (sendData) {
        auto nodeList = DependencyManager::get<NodeList>();

        unsigned char bitset = 0;

        int packetSize = sizeof(bitset);

        if (hasReverb) {
            packetSize += sizeof(reverbTime) + sizeof(wetLevel);
        }

        auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);

        if (hasReverb) {
            setAtBit(bitset, HAS_REVERB_BIT);
        }

        envPacket->writePrimitive(bitset);

        if (hasReverb) {
            envPacket->writePrimitive(reverbTime);
            envPacket->writePrimitive(wetLevel);
        }
        nodeList->sendPacket(std::move(envPacket), *node);
    }
}
Ejemplo n.º 19
0
void AudioMixer::sendStatsPacket() {
    static QJsonObject statsObject;

    statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
    statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
    statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;

    statsObject["average_listeners_per_frame"] = (float) _sumListeners / (float) _numStatFrames;

    if (_sumListeners > 0) {
        statsObject["average_mixes_per_listener"] = (float) _sumMixes / (float) _sumListeners;
    } else {
        statsObject["average_mixes_per_listener"] = 0.0;
    }

    _sumListeners = 0;
    _sumMixes = 0;
    _numStatFrames = 0;

    QJsonObject readPendingDatagramStats;

    QJsonObject rpdCallsStats;
    rpdCallsStats["calls_per_sec_avg_30s"] = _readPendingCallsPerSecondStats.getWindowAverage();
    rpdCallsStats["calls_last_sec"] = _readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5;

    readPendingDatagramStats["calls"] = rpdCallsStats;

    QJsonObject packetsPerCallStats;
    packetsPerCallStats["avg_30s"] = _datagramsReadPerCallStats.getWindowAverage();
    packetsPerCallStats["avg_1s"] = _datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage();

    readPendingDatagramStats["packets_per_call"] = packetsPerCallStats;

    QJsonObject packetsTimePerCallStats;
    packetsTimePerCallStats["usecs_per_call_avg_30s"] = _timeSpentPerCallStats.getWindowAverage();
    packetsTimePerCallStats["usecs_per_call_avg_1s"] = _timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage();
    packetsTimePerCallStats["prct_time_in_call_30s"] =
        _timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND) * 100.0;
    packetsTimePerCallStats["prct_time_in_call_1s"] =
        _timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;

    readPendingDatagramStats["packets_time_per_call"] = packetsTimePerCallStats;

    QJsonObject hashMatchTimePerCallStats;
    hashMatchTimePerCallStats["usecs_per_hashmatch_avg_30s"] = _timeSpentPerHashMatchCallStats.getWindowAverage();
    hashMatchTimePerCallStats["usecs_per_hashmatch_avg_1s"]
        = _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage();
    hashMatchTimePerCallStats["prct_time_in_hashmatch_30s"]
        = _timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0;
    hashMatchTimePerCallStats["prct_time_in_hashmatch_1s"]
        = _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
    readPendingDatagramStats["hashmatch_time_per_call"] = hashMatchTimePerCallStats;

    statsObject["read_pending_datagrams"] = readPendingDatagramStats;

    // add stats for each listerner
    auto nodeList = DependencyManager::get<NodeList>();
    QJsonObject listenerStats;

    nodeList->eachNode([&](const SharedNodePointer& node) {
        AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
        if (clientData) {
            QJsonObject nodeStats;
            QString uuidString = uuidStringWithoutCurlyBraces(node->getUUID());

            nodeStats["outbound_kbps"] = node->getOutboundBandwidth();
            nodeStats[USERNAME_UUID_REPLACEMENT_STATS_KEY] = uuidString;

            nodeStats["jitter"] = clientData->getAudioStreamStats();

            listenerStats[uuidString] = nodeStats;
        }
    });

    // add the listeners object to the root object
    statsObject["listeners"] = listenerStats;

    // send off the stats packets
    ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
}
Ejemplo n.º 20
0
void AudioMixerSlave::processPackets(const SharedNodePointer& node) {
    AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
    if (data) {
        data->processPackets();
    }
}
Ejemplo n.º 21
0
void AudioMixer::run() {

    ThreadedAssignment::commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);

    auto nodeList = DependencyManager::get<NodeList>();

    nodeList->addNodeTypeToInterestSet(NodeType::Agent);

    nodeList->linkedDataCreateCallback = [](Node* node) {
        node->setLinkedData(new AudioMixerClientData());
    };

    // wait until we have the domain-server settings, otherwise we bail
    DomainHandler& domainHandler = nodeList->getDomainHandler();

    qDebug() << "Waiting for domain settings from domain-server.";

    // block until we get the settingsRequestComplete signal
    QEventLoop loop;
    connect(&domainHandler, &DomainHandler::settingsReceived, &loop, &QEventLoop::quit);
    connect(&domainHandler, &DomainHandler::settingsReceiveFail, &loop, &QEventLoop::quit);
    domainHandler.requestDomainSettings();
    loop.exec();
    
    if (domainHandler.getSettingsObject().isEmpty()) {
        qDebug() << "Failed to retreive settings object from domain-server. Bailing on assignment.";
        setFinished(true);
        return;
    }

    const QJsonObject& settingsObject = domainHandler.getSettingsObject();

    // check the settings object to see if we have anything we can parse out
    parseSettingsObject(settingsObject);

    int nextFrame = 0;
    QElapsedTimer timer;
    timer.start();

    int usecToSleep = AudioConstants::NETWORK_FRAME_USECS;

    const int TRAILING_AVERAGE_FRAMES = 100;
    int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;

    while (!_isFinished) {
        const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
        const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;

        const float RATIO_BACK_OFF = 0.02f;

        const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
        const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;

        if (usecToSleep < 0) {
            usecToSleep = 0;
        }

        _trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio)
            + (usecToSleep * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS);

        float lastCutoffRatio = _performanceThrottlingRatio;
        bool hasRatioChanged = false;

        if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
            if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
                // we're struggling - change our min required loudness to reduce some load
                _performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));

                qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            } else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
                // we've recovered and can back off the required loudness
                _performanceThrottlingRatio = _performanceThrottlingRatio - RATIO_BACK_OFF;

                if (_performanceThrottlingRatio < 0) {
                    _performanceThrottlingRatio = 0;
                }

                qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            }

            if (hasRatioChanged) {
                // set out min audability threshold from the new ratio
                _minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
                qDebug() << "Minimum audability required to be mixed is now" << _minAudibilityThreshold;

                framesSinceCutoffEvent = 0;
            }
        }

        if (!hasRatioChanged) {
            ++framesSinceCutoffEvent;
        }

        quint64 now = usecTimestampNow();
        if (now - _lastPerSecondCallbackTime > USECS_PER_SECOND) {
            perSecondActions();
            _lastPerSecondCallbackTime = now;
        }

        nodeList->eachNode([&](const SharedNodePointer& node) {

            if (node->getLinkedData()) {
                AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();

                // this function will attempt to pop a frame from each audio stream.
                // a pointer to the popped data is stored as a member in InboundAudioStream.
                // That's how the popped audio data will be read for mixing (but only if the pop was successful)
                nodeData->checkBuffersBeforeFrameSend();

                // if the stream should be muted, send mute packet
                if (nodeData->getAvatarAudioStream()
                    && shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) {
                    auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
                    nodeList->sendPacket(std::move(mutePacket), *node);
                }

                if (node->getType() == NodeType::Agent && node->getActiveSocket()
                    && nodeData->getAvatarAudioStream()) {

                    int streamsMixed = prepareMixForListeningNode(node.data());

                    std::unique_ptr<NLPacket> mixPacket;

                    if (streamsMixed > 0) {
                        int mixPacketBytes = sizeof(quint16) + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
                        mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // pack mixed audio samples
                        mixPacket->write(reinterpret_cast<char*>(_mixSamples),
                                         AudioConstants::NETWORK_FRAME_BYTES_STEREO);
                    } else {
                        int silentPacketBytes = sizeof(quint16) + sizeof(quint16);
                        mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // pack number of silent audio samples
                        quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
                        mixPacket->writePrimitive(numSilentSamples);
                    }

                    // Send audio environment
                    sendAudioEnvironmentPacket(node);

                    // send mixed audio packet
                    nodeList->sendPacket(std::move(mixPacket), *node);
                    nodeData->incrementOutgoingMixedAudioSequenceNumber();

                    // send an audio stream stats packet if it's time
                    if (_sendAudioStreamStats) {
                        nodeData->sendAudioStreamStatsPackets(node);
                        _sendAudioStreamStats = false;
                    }

                    ++_sumListeners;
                }
            }
        });

        ++_numStatFrames;

        // since we're a while loop we need to help Qt's event processing
        QCoreApplication::processEvents();

        if (_isFinished) {
            // at this point the audio-mixer is done
            // check if we have a deferred delete event to process (which we should once finished)
            QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete);
            break;
        }

        usecToSleep = (++nextFrame * AudioConstants::NETWORK_FRAME_USECS) - timer.nsecsElapsed() / 1000; // ns to us

        if (usecToSleep > 0) {
            usleep(usecToSleep);
        }
    }
}
Ejemplo n.º 22
0
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
    AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
    AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());

    // zero out the mix for this listener
    memset(_mixSamples, 0, sizeof(_mixSamples));

    bool isThrottling = _numToRetain != -1;
    bool isSoloing = !listenerData->getSoloedNodes().empty();

    auto& streams = listenerData->getStreams();

    addStreams(*listener, *listenerData);

    // Process skipped streams
    erase_if(streams.skipped, [&](MixableStream& stream) {
        if (shouldBeRemoved(stream, _sharedData)) {
            return true;
        }

        if (!shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
            if (shouldBeInactive(stream)) {
                streams.inactive.push_back(move(stream));
                ++stats.skippedToInactive;
            } else {
                streams.active.push_back(move(stream));
                ++stats.skippedToActive;
            }
            return true;
        }

        if (!isThrottling) {
            updateHRTFParameters(stream, *listenerAudioStream,
                                 listenerData->getMasterAvatarGain());
        }
        return false;
    });

    // Process inactive streams
    erase_if(streams.inactive, [&](MixableStream& stream) {
        if (shouldBeRemoved(stream, _sharedData)) {
            return true;
        }

        if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
            streams.skipped.push_back(move(stream));
            ++stats.inactiveToSkipped;
            return true;
        }

        if (!shouldBeInactive(stream)) {
            streams.active.push_back(move(stream));
            ++stats.inactiveToActive;
            return true;
        }

        if (!isThrottling) {
            updateHRTFParameters(stream, *listenerAudioStream,
                                 listenerData->getMasterAvatarGain());
        }
        return false;
    });

    // Process active streams
    erase_if(streams.active, [&](MixableStream& stream) {
        if (shouldBeRemoved(stream, _sharedData)) {
            return true;
        }

        if (isThrottling) {
            // we're throttling, so we need to update the approximate volume for any un-skipped streams
            // unless this is simply for an echo (in which case the approx volume is 1.0)
            stream.approximateVolume = approximateVolume(stream, listenerAudioStream);
        } else {
            if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
                addStream(stream, *listenerAudioStream, 0.0f, isSoloing);
                streams.skipped.push_back(move(stream));
                ++stats.activeToSkipped;
                return true;
            }

            addStream(stream, *listenerAudioStream, listenerData->getMasterAvatarGain(),
                      isSoloing);

            if (shouldBeInactive(stream)) {
                // To reduce artifacts we still call render to flush the HRTF for every silent
                // sources on the first frame where the source becomes silent
                // this ensures the correct tail from last mixed block
                streams.inactive.push_back(move(stream));
                ++stats.activeToInactive;
                return true;
            }
        }

        return false;
    });

    if (isThrottling) {
        // since we're throttling, we need to partition the mixable into throttled and unthrottled streams
        int numToRetain = min(_numToRetain, (int)streams.active.size()); // Make sure we don't overflow
        auto throttlePoint = begin(streams.active) + numToRetain;

        std::nth_element(streams.active.begin(), throttlePoint, streams.active.end(),
                         [](const auto& a, const auto& b)
                         {
                             return a.approximateVolume > b.approximateVolume;
                         });

        SegmentedEraseIf<MixableStreamsVector> erase(streams.active);
        erase.iterateTo(throttlePoint, [&](MixableStream& stream) {
            if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
                resetHRTFState(stream);
                streams.skipped.push_back(move(stream));
                ++stats.activeToSkipped;
                return true;
            }

            addStream(stream, *listenerAudioStream, listenerData->getMasterAvatarGain(),
                      isSoloing);

            if (shouldBeInactive(stream)) {
                // To reduce artifacts we still call render to flush the HRTF for every silent
                // sources on the first frame where the source becomes silent
                // this ensures the correct tail from last mixed block
                streams.inactive.push_back(move(stream));
                ++stats.activeToInactive;
                return true;
            }

            return false;
        });
        erase.iterateTo(end(streams.active), [&](MixableStream& stream) {
            // To reduce artifacts we reset the HRTF state for every throttled
            // sources on the first frame where the source becomes throttled
            // this ensures at least remove the tail from last mixed block
            // preventing excessive artifacts on the next first block
            resetHRTFState(stream);

            if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
                streams.skipped.push_back(move(stream));
                ++stats.activeToSkipped;
                return true;
            }

            if (shouldBeInactive(stream)) {
                streams.inactive.push_back(move(stream));
                ++stats.activeToInactive;
                return true;
            }

            return false;
        });
    }

    stats.skipped += (int)streams.skipped.size();
    stats.inactive += (int)streams.inactive.size();
    stats.active += (int)streams.active.size();

    // clear the newly ignored, un-ignored, ignoring, and un-ignoring streams now that we've processed them
    listenerData->clearStagedIgnoreChanges();

#ifdef HIFI_AUDIO_MIXER_DEBUG
    auto mixEnd = p_high_resolution_clock::now();
    auto mixTime = std::chrono::duration_cast<std::chrono::nanoseconds>(mixEnd - mixStart);
    stats.mixTime += mixTime.count();
#endif

    // check for silent audio before limiting
    // limiting uses a dither and can only guarantee abs(sample) <= 1
    bool hasAudio = false;
    for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
        if (_mixSamples[i] != 0.0f) {
            hasAudio = true;
            break;
        }
    }

    // use the per listener AudioLimiter to render the mixed data
    listenerData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    return hasAudio;
}
Ejemplo n.º 23
0
void AudioMixer::broadcastMixes() {
    auto nodeList = DependencyManager::get<NodeList>();

    auto nextFrameTimestamp = p_high_resolution_clock::now();
    auto timeToSleep = std::chrono::microseconds(0);

    const int TRAILING_AVERAGE_FRAMES = 100;
    int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;

    int currentFrame { 1 };
    int numFramesPerSecond { (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC) };

    while (!_isFinished) {
        const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
        const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;

        const float RATIO_BACK_OFF = 0.02f;

        const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
        const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;

        if (timeToSleep.count() < 0) {
            timeToSleep = std::chrono::microseconds(0);
        }

        _trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio)
            + (timeToSleep.count() * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS);

        float lastCutoffRatio = _performanceThrottlingRatio;
        bool hasRatioChanged = false;

        if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
            if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
                // we're struggling - change our min required loudness to reduce some load
                _performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));

                qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            } else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
                // we've recovered and can back off the required loudness
                _performanceThrottlingRatio = _performanceThrottlingRatio - RATIO_BACK_OFF;

                if (_performanceThrottlingRatio < 0) {
                    _performanceThrottlingRatio = 0;
                }

                qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            }

            if (hasRatioChanged) {
                // set out min audability threshold from the new ratio
                _minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
                qDebug() << "Minimum audability required to be mixed is now" << _minAudibilityThreshold;

                framesSinceCutoffEvent = 0;
            }
        }

        if (!hasRatioChanged) {
            ++framesSinceCutoffEvent;
        }

        nodeList->eachNode([&](const SharedNodePointer& node) {

            if (node->getLinkedData()) {
                AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();

                // this function will attempt to pop a frame from each audio stream.
                // a pointer to the popped data is stored as a member in InboundAudioStream.
                // That's how the popped audio data will be read for mixing (but only if the pop was successful)
                nodeData->checkBuffersBeforeFrameSend();

                // if the stream should be muted, send mute packet
                if (nodeData->getAvatarAudioStream()
                    && shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) {
                    auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
                    nodeList->sendPacket(std::move(mutePacket), *node);
                }

                if (node->getType() == NodeType::Agent && node->getActiveSocket()
                    && nodeData->getAvatarAudioStream()) {

                    bool mixHasAudio = prepareMixForListeningNode(node.data());

                    std::unique_ptr<NLPacket> mixPacket;

                    if (mixHasAudio) {
                        int mixPacketBytes = sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE 
                                                             + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
                        mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // write the codec
                        QString codecInPacket = nodeData->getCodecName();
                        mixPacket->writeString(codecInPacket);

                        QByteArray decodedBuffer(reinterpret_cast<char*>(_clampedSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
                        QByteArray encodedBuffer;
                        nodeData->encode(decodedBuffer, encodedBuffer);

                        // pack mixed audio samples
                        mixPacket->write(encodedBuffer.constData(), encodedBuffer.size());
                    } else {
                        int silentPacketBytes = sizeof(quint16) + sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE;
                        mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // write the codec
                        QString codecInPacket = nodeData->getCodecName();
                        mixPacket->writeString(codecInPacket);

                        // pack number of silent audio samples
                        quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
                        mixPacket->writePrimitive(numSilentSamples);
                    }

                    // Send audio environment
                    sendAudioEnvironmentPacket(node);

                    // send mixed audio packet
                    nodeList->sendPacket(std::move(mixPacket), *node);
                    nodeData->incrementOutgoingMixedAudioSequenceNumber();

                    // send an audio stream stats packet to the client approximately every second
                    ++currentFrame;
                    currentFrame %= numFramesPerSecond;

                    if (nodeData->shouldSendStats(currentFrame)) {
                        nodeData->sendAudioStreamStatsPackets(node);
                    }

                    ++_sumListeners;
                }
            }
        });

        ++_numStatFrames;

        // since we're a while loop we need to help Qt's event processing
        QCoreApplication::processEvents();

        if (_isFinished) {
            // at this point the audio-mixer is done
            // check if we have a deferred delete event to process (which we should once finished)
            QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete);
            break;
        }

        // push the next frame timestamp to when we should send the next
        nextFrameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);

        // sleep as long as we need until next frame, if we can
        auto now = p_high_resolution_clock::now();
        timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(nextFrameTimestamp - now);

        std::this_thread::sleep_for(timeToSleep);
    }
}
Ejemplo n.º 24
0
void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID,
        const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
        bool throttle) {
    ++stats.totalMixes;

    // to reduce artifacts we call the HRTF functor for every source, even if throttled or silent
    // this ensures the correct tail from last mixed block and the correct spatialization of next first block

    // check if this is a server echo of a source back to itself
    bool isEcho = (&streamToAdd == &listeningNodeStream);

    glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();

    float distance = glm::max(glm::length(relativePosition), EPSILON);
    float gain = computeGain(listenerNodeData, listeningNodeStream, streamToAdd, relativePosition, isEcho);
    float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);
    const int HRTF_DATASET_INDEX = 1;

    if (!streamToAdd.lastPopSucceeded()) {
        bool forceSilentBlock = true;

        if (!streamToAdd.getLastPopOutput().isNull()) {
            bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);

            // in an injector, just go silent - the injector has likely ended
            // in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
            if (!isInjector) {
                // calculate its fade factor, which depends on how many times it's already been repeated.
                float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
                if (fadeFactor > 0.0f) {
                    // apply the fadeFactor to the gain
                    gain *= fadeFactor;
                    forceSilentBlock = false;
                }
            }
        }

        if (forceSilentBlock) {
            // call renderSilent with a forced silent block to reduce artifacts
            // (this is not done for stereo streams since they do not go through the HRTF)
            if (!streamToAdd.isStereo() && !isEcho) {
                // get the existing listener-source HRTF object, or create a new one
                auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());

                static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
                hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                                  AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

                ++stats.hrtfSilentRenders;
            }

            return;
        }
    }

    // grab the stream from the ring buffer
    AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();

    // stereo sources are not passed through HRTF
    if (streamToAdd.isStereo()) {
        for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
            _mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
        }

        ++stats.manualStereoMixes;
        return;
    }

    // echo sources are not passed through HRTF
    if (isEcho) {
        for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
            auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
            _mixSamples[i] += monoSample;
            _mixSamples[i + 1] += monoSample;
        }

        ++stats.manualEchoMixes;
        return;
    }

    // get the existing listener-source HRTF object, or create a new one
    auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());

    streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
        // call renderSilent to reduce artifacts
        hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                          AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++stats.hrtfSilentRenders;
        return;
    }

    if (throttle) {
        // call renderSilent with actual frame data and a gain of 0.0f to reduce artifacts
        hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
                          AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

        ++stats.hrtfThrottleRenders;
        return;
    }

    hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
                AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    ++stats.hrtfRenders;
}
Ejemplo n.º 25
0
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
    AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
    AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());

    // if we received an invalid position from this listener, then refuse to make them a mix
    // because we don't know how to do it properly
    if (!listenerAudioStream->hasValidPosition()) {
        return false;
    }

    // zero out the mix for this listener
    memset(_mixSamples, 0, sizeof(_mixSamples));

    bool isThrottling = _throttlingRatio > 0.0f;
    std::vector<std::pair<float, SharedNodePointer>> throttledNodes;

    typedef void (AudioMixerSlave::*MixFunctor)(
            AudioMixerClientData&, const QUuid&, const AvatarAudioStream&, const PositionalAudioStream&);
    auto forAllStreams = [&](const SharedNodePointer& node, AudioMixerClientData* nodeData, MixFunctor mixFunctor) {
        auto nodeID = node->getUUID();
        for (auto& streamPair : nodeData->getAudioStreams()) {
            auto nodeStream = streamPair.second;
            (this->*mixFunctor)(*listenerData, nodeID, *listenerAudioStream, *nodeStream);
        }
    };

#ifdef HIFI_AUDIO_MIXER_DEBUG
    auto mixStart = p_high_resolution_clock::now();
#endif

    std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
        AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
        if (!nodeData) {
            return;
        }

        if (*node == *listener) {
            // only mix the echo, if requested
            for (auto& streamPair : nodeData->getAudioStreams()) {
                auto nodeStream = streamPair.second;
                if (nodeStream->shouldLoopbackForNode()) {
                    mixStream(*listenerData, node->getUUID(), *listenerAudioStream, *nodeStream);
                }
            }
        } else if (!listenerData->shouldIgnore(listener, node, _frame)) {
            if (!isThrottling) {
                forAllStreams(node, nodeData, &AudioMixerSlave::mixStream);
            } else {
                auto nodeID = node->getUUID();

                // compute the node's max relative volume
                float nodeVolume = 0.0f;
                for (auto& streamPair : nodeData->getAudioStreams()) {
                    auto nodeStream = streamPair.second;

                    // approximate the gain
                    glm::vec3 relativePosition = nodeStream->getPosition() - listenerAudioStream->getPosition();
                    float gain = approximateGain(*listenerAudioStream, *nodeStream, relativePosition);

                    // modify by hrtf gain adjustment
                    auto& hrtf = listenerData->hrtfForStream(nodeID, nodeStream->getStreamIdentifier());
                    gain *= hrtf.getGainAdjustment();

                    auto streamVolume = nodeStream->getLastPopOutputTrailingLoudness() * gain;
                    nodeVolume = std::max(streamVolume, nodeVolume);
                }

                // max-heapify the nodes by relative volume
                throttledNodes.push_back({ nodeVolume, node });
                std::push_heap(throttledNodes.begin(), throttledNodes.end());
            }
        }
    });

    if (isThrottling) {
        // pop the loudest nodes off the heap and mix their streams
        int numToRetain = (int)(std::distance(_begin, _end) * (1 - _throttlingRatio));
        for (int i = 0; i < numToRetain; i++) {
            if (throttledNodes.empty()) {
                break;
            }

            std::pop_heap(throttledNodes.begin(), throttledNodes.end());

            auto& node = throttledNodes.back().second;
            AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
            forAllStreams(node, nodeData, &AudioMixerSlave::mixStream);

            throttledNodes.pop_back();
        }

        // throttle the remaining nodes' streams
        for (const std::pair<float, SharedNodePointer>& nodePair : throttledNodes) {
            auto& node = nodePair.second;
            AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
            forAllStreams(node, nodeData, &AudioMixerSlave::throttleStream);
        }
    }

#ifdef HIFI_AUDIO_MIXER_DEBUG
    auto mixEnd = p_high_resolution_clock::now();
    auto mixTime = std::chrono::duration_cast<std::chrono::nanoseconds>(mixEnd - mixStart);
    stats.mixTime += mixTime.count();
#endif

    // check for silent audio before limiting
    // limiting uses a dither and can only guarantee abs(sample) <= 1
    bool hasAudio = false;
    for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
        if (_mixSamples[i] != 0.0f) {
            hasAudio = true;
            break;
        }
    }

    // use the per listener AudioLimiter to render the mixed data
    listenerData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);

    return hasAudio;
}