Ejemplo n.º 1
0
void AudioMixer::handleMuteEnvironmentPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
    auto nodeList = DependencyManager::get<NodeList>();

    if (sendingNode->isAllowedEditor()) {
        glm::vec3 position;
        float radius;

        auto newPacket = NLPacket::create(PacketType::MuteEnvironment, sizeof(position) + sizeof(radius));

        // read the position and radius from the sent packet
        message->readPrimitive(&position);
        message->readPrimitive(&radius);

        // write them to our packet
        newPacket->writePrimitive(position);
        newPacket->writePrimitive(radius);

        nodeList->eachNode([&](const SharedNodePointer& node){
            if (node->getType() == NodeType::Agent && node->getActiveSocket() &&
                node->getLinkedData() && node != sendingNode) {
                nodeList->sendUnreliablePacket(*newPacket, *node);
            }
        });
    }
}
Ejemplo n.º 2
0
void AudioMixer::sendStatsPacket() {
    static QJsonObject statsObject;

    statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
    statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
    statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;

    statsObject["avg_listeners_per_frame"] = (float) _sumListeners / (float) _numStatFrames;

    QJsonObject mixStats;
    mixStats["%_hrtf_mixes"] = percentageForMixStats(_hrtfRenders);
    mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_hrtfSilentRenders);
    mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_hrtfStruggleRenders);
    mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_manualStereoMixes);
    mixStats["%_manual_echo_mixes"] = percentageForMixStats(_manualEchoMixes);

    mixStats["total_mixes"] = _totalMixes;
    mixStats["avg_mixes_per_block"] = _totalMixes / _numStatFrames;

    statsObject["mix_stats"] = mixStats;

    _sumListeners = 0;
    _hrtfRenders = 0;
    _hrtfSilentRenders = 0;
    _hrtfStruggleRenders = 0;
    _manualStereoMixes = 0;
    _manualEchoMixes = 0;
    _totalMixes = 0;
    _numStatFrames = 0;

    // add stats for each listerner
    auto nodeList = DependencyManager::get<NodeList>();
    QJsonObject listenerStats;

    nodeList->eachNode([&](const SharedNodePointer& node) {
        AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
        if (clientData) {
            QJsonObject nodeStats;
            QString uuidString = uuidStringWithoutCurlyBraces(node->getUUID());

            nodeStats["outbound_kbps"] = node->getOutboundBandwidth();
            nodeStats[USERNAME_UUID_REPLACEMENT_STATS_KEY] = uuidString;

            nodeStats["jitter"] = clientData->getAudioStreamStats();

            listenerStats[uuidString] = nodeStats;
        }
    });

    // add the listeners object to the root object
    statsObject["z_listeners"] = listenerStats;

    // send off the stats packets
    ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
}
Ejemplo n.º 3
0
void AudioMixer::handleNodeKilled(SharedNodePointer killedNode) {
    // enumerate the connected listeners to remove HRTF objects for the disconnected node
    auto nodeList = DependencyManager::get<NodeList>();

    nodeList->eachNode([](const SharedNodePointer& node) {
        auto clientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
        if (clientData) {
            clientData->removeHRTFsForNode(node->getUUID());
        }
    });
}
Ejemplo n.º 4
0
void AudioMixer::removeHRTFsForFinishedInjector(const QUuid& streamID) {
    auto injectorClientData = qobject_cast<AudioMixerClientData*>(sender());
    if (injectorClientData) {
        // enumerate the connected listeners to remove HRTF objects for the disconnected injector
        auto nodeList = DependencyManager::get<NodeList>();

        nodeList->eachNode([injectorClientData, &streamID](const SharedNodePointer& node){
            auto listenerClientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
            if (listenerClientData) {
                listenerClientData->removeHRTFForStream(injectorClientData->getNodeID(), streamID);
            }
        });
    }
}
Ejemplo n.º 5
0
void LimitedNodeList::eraseAllNodes() {
    qCDebug(networking) << "Clearing the NodeList. Deleting all nodes in list.";

    QSet<SharedNodePointer> killedNodes;
    eachNode([&killedNodes](const SharedNodePointer& node) {
        killedNodes.insert(node);
    });

    // iterate the current nodes, emit that they are dying and remove them from the hash
    _nodeMutex.lockForWrite();
    _nodeHash.clear();
    _nodeMutex.unlock();

    foreach(const SharedNodePointer& killedNode, killedNodes) {
        handleNodeKill(killedNode);
    }
Ejemplo n.º 6
0
void AudioMixer::handleMuteEnvironmentPacket(QSharedPointer<NLPacket> packet, SharedNodePointer sendingNode) {
    auto nodeList = DependencyManager::get<NodeList>();
    
    if (sendingNode->getCanAdjustLocks()) {
        auto newPacket = NLPacket::create(PacketType::MuteEnvironment, packet->getPayloadSize());
        // Copy payload
        newPacket->write(packet->getPayload(), packet->getPayloadSize());

        nodeList->eachNode([&](const SharedNodePointer& node){
            if (node->getType() == NodeType::Agent && node->getActiveSocket() &&
                node->getLinkedData() && node != sendingNode) {
                nodeList->sendPacket(std::move(newPacket), *node);
            }
        });
    }
}
Ejemplo n.º 7
0
// FIXME - make these stats relevant
void MessagesMixer::sendStatsPacket() {
    QJsonObject statsObject;
    QJsonObject messagesObject;
    auto nodeList = DependencyManager::get<NodeList>();
    // add stats for each listerner
    nodeList->eachNode([&](const SharedNodePointer& node) {
        QJsonObject messagesStats;

        // add the key to ask the domain-server for a username replacement, if it has it
        messagesStats[USERNAME_UUID_REPLACEMENT_STATS_KEY] = uuidStringWithoutCurlyBraces(node->getUUID());
        messagesStats["outbound_kbps"] = node->getOutboundBandwidth();
        messagesStats["inbound_kbps"] = node->getInboundBandwidth();

        messagesObject[uuidStringWithoutCurlyBraces(node->getUUID())] = messagesStats;
    });

    statsObject["messages"] = messagesObject;
    ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
}
Ejemplo n.º 8
0
void AssignmentClientMonitor::checkSpares() {
    auto nodeList = DependencyManager::get<NodeList>();
    QUuid aSpareId = "";
    unsigned int spareCount = 0;
    unsigned int totalCount = 0;

    nodeList->removeSilentNodes();

    nodeList->eachNode([&](const SharedNodePointer& node) {
        AssignmentClientChildData* childData = static_cast<AssignmentClientChildData*>(node->getLinkedData());
        totalCount ++;
        if (childData->getChildType() == Assignment::Type::AllTypes) {
            ++spareCount;
            aSpareId = node->getUUID();
        }
    });

    // Spawn or kill children, as needed.  If --min or --max weren't specified, allow the child count
    // to drift up or down as far as needed.

    if (spareCount < 1 || totalCount < _minAssignmentClientForks) {
        if (!_maxAssignmentClientForks || totalCount < _maxAssignmentClientForks) {
            spawnChildClient();
        }
    }

    if (spareCount > 1) {
        if (!_minAssignmentClientForks || totalCount > _minAssignmentClientForks) {
            // kill aSpareId
            qDebug() << "asking child" << aSpareId << "to exit.";
            SharedNodePointer childNode = nodeList->nodeWithUUID(aSpareId);
            childNode->activateLocalSocket();

            auto diePacket = NLPacket::create(PacketType::StopNode, 0);
            nodeList->sendPacket(std::move(diePacket), *childNode);
        }
    }
}
Ejemplo n.º 9
0
void ScriptEngine::run() {
    // TODO: can we add a short circuit for _stoppingAllScripts here? What does it mean to not start running if
    // we're in the process of stopping?

    if (!_isInitialized) {
        init();
    }
    _isRunning = true;
    _isFinished = false;
    emit runningStateChanged();

    QScriptValue result = evaluate(_scriptContents);

    QElapsedTimer startTime;
    startTime.start();

    int thisFrame = 0;

    auto nodeList = DependencyManager::get<NodeList>();
    auto entityScriptingInterface = DependencyManager::get<EntityScriptingInterface>();

    qint64 lastUpdate = usecTimestampNow();

    while (!_isFinished) {
        int usecToSleep = (thisFrame++ * SCRIPT_DATA_CALLBACK_USECS) - startTime.nsecsElapsed() / 1000; // nsec to usec
        if (usecToSleep > 0) {
            usleep(usecToSleep);
        }

        if (_isFinished) {
            break;
        }

        QCoreApplication::processEvents();

        if (_isFinished) {
            break;
        }

        if (!_isFinished && entityScriptingInterface->getEntityPacketSender()->serversExist()) {
            // release the queue of edit entity messages.
            entityScriptingInterface->getEntityPacketSender()->releaseQueuedMessages();

            // since we're in non-threaded mode, call process so that the packets are sent
            if (!entityScriptingInterface->getEntityPacketSender()->isThreaded()) {
                entityScriptingInterface->getEntityPacketSender()->process();
            }
        }

        if (!_isFinished && _isAvatar && _avatarData) {

            const int SCRIPT_AUDIO_BUFFER_SAMPLES = floor(((SCRIPT_DATA_CALLBACK_USECS * AudioConstants::SAMPLE_RATE)
                                                           / (1000 * 1000)) + 0.5);
            const int SCRIPT_AUDIO_BUFFER_BYTES = SCRIPT_AUDIO_BUFFER_SAMPLES * sizeof(int16_t);

            QByteArray avatarByteArray = _avatarData->toByteArray();
            auto avatarPacket = NLPacket::create(PacketType::AvatarData, avatarByteArray.size());

            avatarPacket->write(avatarByteArray);

            nodeList->broadcastToNodes(std::move(avatarPacket), NodeSet() << NodeType::AvatarMixer);

            if (_isListeningToAudioStream || _avatarSound) {
                // if we have an avatar audio stream then send it out to our audio-mixer
                bool silentFrame = true;

                int16_t numAvailableSamples = SCRIPT_AUDIO_BUFFER_SAMPLES;
                const int16_t* nextSoundOutput = NULL;

                if (_avatarSound) {

                    const QByteArray& soundByteArray = _avatarSound->getByteArray();
                    nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
                                                                       + _numAvatarSoundSentBytes);

                    int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > SCRIPT_AUDIO_BUFFER_BYTES
                        ? SCRIPT_AUDIO_BUFFER_BYTES
                        : soundByteArray.size() - _numAvatarSoundSentBytes;
                    numAvailableSamples = numAvailableBytes / sizeof(int16_t);


                    // check if the all of the _numAvatarAudioBufferSamples to be sent are silence
                    for (int i = 0; i < numAvailableSamples; ++i) {
                        if (nextSoundOutput[i] != 0) {
                            silentFrame = false;
                            break;
                        }
                    }

                    _numAvatarSoundSentBytes += numAvailableBytes;
                    if (_numAvatarSoundSentBytes == soundByteArray.size()) {
                        // we're done with this sound object - so set our pointer back to NULL
                        // and our sent bytes back to zero
                        _avatarSound = NULL;
                        _numAvatarSoundSentBytes = 0;
                    }
                }

                auto audioPacket = NLPacket::create(silentFrame
                                                    ? PacketType::SilentAudioFrame
                                                    : PacketType::MicrophoneAudioNoEcho);

                // seek past the sequence number, will be packed when destination node is known
                audioPacket->seek(sizeof(quint16));

                if (silentFrame) {
                    if (!_isListeningToAudioStream) {
                        // if we have a silent frame and we're not listening then just send nothing and break out of here
                        break;
                    }

                    // write the number of silent samples so the audio-mixer can uphold timing
                    audioPacket->writePrimitive(SCRIPT_AUDIO_BUFFER_SAMPLES);

                    // use the orientation and position of this avatar for the source of this audio
                    audioPacket->writePrimitive(_avatarData->getPosition());
                    glm::quat headOrientation = _avatarData->getHeadOrientation();
                    audioPacket->writePrimitive(headOrientation);

                } else if (nextSoundOutput) {
                    // assume scripted avatar audio is mono and set channel flag to zero
                    audioPacket->writePrimitive((quint8) 0);

                    // use the orientation and position of this avatar for the source of this audio
                    audioPacket->writePrimitive(_avatarData->getPosition());
                    glm::quat headOrientation = _avatarData->getHeadOrientation();
                    audioPacket->writePrimitive(headOrientation);

                    // write the raw audio data
                    audioPacket->write(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples * sizeof(int16_t));
                }

                // write audio packet to AudioMixer nodes
                auto nodeList = DependencyManager::get<NodeList>();
                nodeList->eachNode([this, &nodeList, &audioPacket](const SharedNodePointer& node){
                    // only send to nodes of type AudioMixer
                    if (node->getType() == NodeType::AudioMixer) {
                        // pack sequence number
                        quint16 sequence = _outgoingScriptAudioSequenceNumbers[node->getUUID()]++;
                        audioPacket->seek(0);
                        audioPacket->writePrimitive(sequence);

                        // send audio packet
                        nodeList->sendUnreliablePacket(*audioPacket, *node);
                    }
                });
            }
        }

        qint64 now = usecTimestampNow();
        float deltaTime = (float) (now - lastUpdate) / (float) USECS_PER_SECOND;

        if (hasUncaughtException()) {
            int line = uncaughtExceptionLineNumber();
            qCDebug(scriptengine) << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << uncaughtException().toString();
            emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + uncaughtException().toString());
            clearExceptions();
        }

        if (!_isFinished) {
            emit update(deltaTime);
        }
        lastUpdate = now;

    }

    stopAllTimers(); // make sure all our timers are stopped if the script is ending
    emit scriptEnding();

    // kill the avatar identity timer
    delete _avatarIdentityTimer;

    if (entityScriptingInterface->getEntityPacketSender()->serversExist()) {
        // release the queue of edit entity messages.
        entityScriptingInterface->getEntityPacketSender()->releaseQueuedMessages();

        // since we're in non-threaded mode, call process so that the packets are sent
        if (!entityScriptingInterface->getEntityPacketSender()->isThreaded()) {
            // wait here till the edit packet sender is completely done sending
            while (entityScriptingInterface->getEntityPacketSender()->hasPacketsToSend()) {
                entityScriptingInterface->getEntityPacketSender()->process();
                QCoreApplication::processEvents();
            }
        } else {
            // FIXME - do we need to have a similar "wait here" loop for non-threaded packet senders?
        }
    }

    emit finished(_fileNameString);

    _isRunning = false;
    emit runningStateChanged();

    emit doneRunning();

    _doneRunningThisScript = true;
}
Ejemplo n.º 10
0
void OctreeHeadlessViewer::queryOctree() {
    char serverType = getMyNodeType();
    PacketType packetType = getMyQueryMessageType();
    NodeToJurisdictionMap& jurisdictions = *_jurisdictionListener->getJurisdictions();

    bool wantExtraDebugging = false;

    if (wantExtraDebugging) {
        qCDebug(octree) << "OctreeHeadlessViewer::queryOctree() _jurisdictionListener=" << _jurisdictionListener;
        qCDebug(octree) << "---------------";
        qCDebug(octree) << "_jurisdictionListener=" << _jurisdictionListener;
        qCDebug(octree) << "Jurisdictions...";
        jurisdictions.lockForRead();
        for (NodeToJurisdictionMapIterator i = jurisdictions.begin(); i != jurisdictions.end(); ++i) {
            qCDebug(octree) << i.key() << ": " << &i.value();
        }
        jurisdictions.unlock();
        qCDebug(octree) << "---------------";
    }

    // These will be the same for all servers, so we can set them up once and then reuse for each server we send to.
    _octreeQuery.setWantLowResMoving(true);
    _octreeQuery.setWantColor(true);
    _octreeQuery.setWantDelta(true);
    _octreeQuery.setWantOcclusionCulling(false);
    _octreeQuery.setWantCompression(true); // TODO: should be on by default

    _octreeQuery.setCameraPosition(_viewFrustum.getPosition());
    _octreeQuery.setCameraOrientation(_viewFrustum.getOrientation());
    _octreeQuery.setCameraFov(_viewFrustum.getFieldOfView());
    _octreeQuery.setCameraAspectRatio(_viewFrustum.getAspectRatio());
    _octreeQuery.setCameraNearClip(_viewFrustum.getNearClip());
    _octreeQuery.setCameraFarClip(_viewFrustum.getFarClip());
    _octreeQuery.setCameraEyeOffsetPosition(glm::vec3());

    _octreeQuery.setOctreeSizeScale(getVoxelSizeScale());
    _octreeQuery.setBoundaryLevelAdjust(getBoundaryLevelAdjust());

    // Iterate all of the nodes, and get a count of how many voxel servers we have...
    int totalServers = 0;
    int inViewServers = 0;
    int unknownJurisdictionServers = 0;

    DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& node){
        // only send to the NodeTypes that are serverType
        if (node->getActiveSocket() && node->getType() == serverType) {
            totalServers++;

            // get the server bounds for this server
            QUuid nodeUUID = node->getUUID();

            // if we haven't heard from this voxel server, go ahead and send it a query, so we
            // can get the jurisdiction...
            jurisdictions.lockForRead();
            if (jurisdictions.find(nodeUUID) == jurisdictions.end()) {
                jurisdictions.unlock();
                unknownJurisdictionServers++;
            } else {
                const JurisdictionMap& map = (jurisdictions)[nodeUUID];

                unsigned char* rootCode = map.getRootOctalCode();

                if (rootCode) {
                    VoxelPositionSize rootDetails;
                    voxelDetailsForCode(rootCode, rootDetails);
                    jurisdictions.unlock();
                    AACube serverBounds(glm::vec3(rootDetails.x, rootDetails.y, rootDetails.z), rootDetails.s);

                    ViewFrustum::location serverFrustumLocation = _viewFrustum.cubeInFrustum(serverBounds);

                    if (serverFrustumLocation != ViewFrustum::OUTSIDE) {
                        inViewServers++;
                    }
                } else {
                    jurisdictions.unlock();
                }
            }
        }
    });

    if (wantExtraDebugging) {
        qCDebug(octree, "Servers: total %d, in view %d, unknown jurisdiction %d",
            totalServers, inViewServers, unknownJurisdictionServers);
    }

    int perServerPPS = 0;
    const int SMALL_BUDGET = 10;
    int perUnknownServer = SMALL_BUDGET;
    int totalPPS = getMaxPacketsPerSecond();

    // determine PPS based on number of servers
    if (inViewServers >= 1) {
        // set our preferred PPS to be exactly evenly divided among all of the voxel servers... and allocate 1 PPS
        // for each unknown jurisdiction server
        perServerPPS = (totalPPS / inViewServers) - (unknownJurisdictionServers * perUnknownServer);
    } else {
        if (unknownJurisdictionServers > 0) {
            perUnknownServer = (totalPPS / unknownJurisdictionServers);
        }
    }

    if (wantExtraDebugging) {
        qCDebug(octree, "perServerPPS: %d perUnknownServer: %d", perServerPPS, perUnknownServer);
    }

    auto nodeList = DependencyManager::get<NodeList>();
    nodeList->eachNode([&](const SharedNodePointer& node){
        // only send to the NodeTypes that are serverType
        if (node->getActiveSocket() && node->getType() == serverType) {

            // get the server bounds for this server
            QUuid nodeUUID = node->getUUID();

            bool inView = false;
            bool unknownView = false;

            // if we haven't heard from this voxel server, go ahead and send it a query, so we
            // can get the jurisdiction...
            jurisdictions.lockForRead();
            if (jurisdictions.find(nodeUUID) == jurisdictions.end()) {
                jurisdictions.unlock();
                unknownView = true; // assume it's in view
                if (wantExtraDebugging) {
                    qCDebug(octree) << "no known jurisdiction for node " << *node << ", assume it's visible.";
                }
            } else {
                const JurisdictionMap& map = (jurisdictions)[nodeUUID];

                unsigned char* rootCode = map.getRootOctalCode();

                if (rootCode) {
                    VoxelPositionSize rootDetails;
                    voxelDetailsForCode(rootCode, rootDetails);
                    jurisdictions.unlock();
                    AACube serverBounds(glm::vec3(rootDetails.x, rootDetails.y, rootDetails.z), rootDetails.s);

                    ViewFrustum::location serverFrustumLocation = _viewFrustum.cubeInFrustum(serverBounds);
                    if (serverFrustumLocation != ViewFrustum::OUTSIDE) {
                        inView = true;
                    } else {
                        inView = false;
                    }
                } else {
                    jurisdictions.unlock();
                    if (wantExtraDebugging) {
                        qCDebug(octree) << "Jurisdiction without RootCode for node " << *node << ". That's unusual!";
                    }
                }
            }

            if (inView) {
                _octreeQuery.setMaxQueryPacketsPerSecond(perServerPPS);
                if (wantExtraDebugging) {
                    qCDebug(octree) << "inView for node " << *node << ", give it budget of " << perServerPPS;
                }
            } else if (unknownView) {
                if (wantExtraDebugging) {
                    qCDebug(octree) << "no known jurisdiction for node " << *node << ", give it budget of "
                    << perUnknownServer << " to send us jurisdiction.";
                }

                // set the query's position/orientation to be degenerate in a manner that will get the scene quickly
                // If there's only one server, then don't do this, and just let the normal voxel query pass through
                // as expected... this way, we will actually get a valid scene if there is one to be seen
                if (totalServers > 1) {
                    _octreeQuery.setCameraPosition(glm::vec3(-0.1,-0.1,-0.1));
                    const glm::quat OFF_IN_NEGATIVE_SPACE = glm::quat(-0.5, 0, -0.5, 1.0);
                    _octreeQuery.setCameraOrientation(OFF_IN_NEGATIVE_SPACE);
                    _octreeQuery.setCameraNearClip(0.1f);
                    _octreeQuery.setCameraFarClip(0.1f);
                    if (wantExtraDebugging) {
                        qCDebug(octree) << "Using 'minimal' camera position for node" << *node;
                    }
                } else {
                    if (wantExtraDebugging) {
                        qCDebug(octree) << "Using regular camera position for node" << *node;
                    }
                }
                _octreeQuery.setMaxQueryPacketsPerSecond(perUnknownServer);
            } else {
                _octreeQuery.setMaxQueryPacketsPerSecond(0);
            }

            // setup the query packet
            auto queryPacket = NLPacket::create(packetType);
            _octreeQuery.getBroadcastData(reinterpret_cast<unsigned char*>(queryPacket->getPayload()));

            // ask the NodeList to send it
            nodeList->sendPacket(std::move(queryPacket), *node);
        }
    });
}
Ejemplo n.º 11
0
void AudioMixer::run() {

    ThreadedAssignment::commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);

    auto nodeList = DependencyManager::get<NodeList>();

    nodeList->addNodeTypeToInterestSet(NodeType::Agent);

    nodeList->linkedDataCreateCallback = [](Node* node) {
        node->setLinkedData(new AudioMixerClientData());
    };

    // wait until we have the domain-server settings, otherwise we bail
    DomainHandler& domainHandler = nodeList->getDomainHandler();

    qDebug() << "Waiting for domain settings from domain-server.";

    // block until we get the settingsRequestComplete signal
    QEventLoop loop;
    connect(&domainHandler, &DomainHandler::settingsReceived, &loop, &QEventLoop::quit);
    connect(&domainHandler, &DomainHandler::settingsReceiveFail, &loop, &QEventLoop::quit);
    domainHandler.requestDomainSettings();
    loop.exec();
    
    if (domainHandler.getSettingsObject().isEmpty()) {
        qDebug() << "Failed to retreive settings object from domain-server. Bailing on assignment.";
        setFinished(true);
        return;
    }

    const QJsonObject& settingsObject = domainHandler.getSettingsObject();

    // check the settings object to see if we have anything we can parse out
    parseSettingsObject(settingsObject);

    int nextFrame = 0;
    QElapsedTimer timer;
    timer.start();

    int usecToSleep = AudioConstants::NETWORK_FRAME_USECS;

    const int TRAILING_AVERAGE_FRAMES = 100;
    int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;

    while (!_isFinished) {
        const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
        const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;

        const float RATIO_BACK_OFF = 0.02f;

        const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
        const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;

        if (usecToSleep < 0) {
            usecToSleep = 0;
        }

        _trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio)
            + (usecToSleep * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS);

        float lastCutoffRatio = _performanceThrottlingRatio;
        bool hasRatioChanged = false;

        if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
            if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
                // we're struggling - change our min required loudness to reduce some load
                _performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));

                qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            } else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
                // we've recovered and can back off the required loudness
                _performanceThrottlingRatio = _performanceThrottlingRatio - RATIO_BACK_OFF;

                if (_performanceThrottlingRatio < 0) {
                    _performanceThrottlingRatio = 0;
                }

                qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            }

            if (hasRatioChanged) {
                // set out min audability threshold from the new ratio
                _minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
                qDebug() << "Minimum audability required to be mixed is now" << _minAudibilityThreshold;

                framesSinceCutoffEvent = 0;
            }
        }

        if (!hasRatioChanged) {
            ++framesSinceCutoffEvent;
        }

        quint64 now = usecTimestampNow();
        if (now - _lastPerSecondCallbackTime > USECS_PER_SECOND) {
            perSecondActions();
            _lastPerSecondCallbackTime = now;
        }

        nodeList->eachNode([&](const SharedNodePointer& node) {

            if (node->getLinkedData()) {
                AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();

                // this function will attempt to pop a frame from each audio stream.
                // a pointer to the popped data is stored as a member in InboundAudioStream.
                // That's how the popped audio data will be read for mixing (but only if the pop was successful)
                nodeData->checkBuffersBeforeFrameSend();

                // if the stream should be muted, send mute packet
                if (nodeData->getAvatarAudioStream()
                    && shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) {
                    auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
                    nodeList->sendPacket(std::move(mutePacket), *node);
                }

                if (node->getType() == NodeType::Agent && node->getActiveSocket()
                    && nodeData->getAvatarAudioStream()) {

                    int streamsMixed = prepareMixForListeningNode(node.data());

                    std::unique_ptr<NLPacket> mixPacket;

                    if (streamsMixed > 0) {
                        int mixPacketBytes = sizeof(quint16) + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
                        mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // pack mixed audio samples
                        mixPacket->write(reinterpret_cast<char*>(_mixSamples),
                                         AudioConstants::NETWORK_FRAME_BYTES_STEREO);
                    } else {
                        int silentPacketBytes = sizeof(quint16) + sizeof(quint16);
                        mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // pack number of silent audio samples
                        quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
                        mixPacket->writePrimitive(numSilentSamples);
                    }

                    // Send audio environment
                    sendAudioEnvironmentPacket(node);

                    // send mixed audio packet
                    nodeList->sendPacket(std::move(mixPacket), *node);
                    nodeData->incrementOutgoingMixedAudioSequenceNumber();

                    // send an audio stream stats packet if it's time
                    if (_sendAudioStreamStats) {
                        nodeData->sendAudioStreamStatsPackets(node);
                        _sendAudioStreamStats = false;
                    }

                    ++_sumListeners;
                }
            }
        });

        ++_numStatFrames;

        // since we're a while loop we need to help Qt's event processing
        QCoreApplication::processEvents();

        if (_isFinished) {
            // at this point the audio-mixer is done
            // check if we have a deferred delete event to process (which we should once finished)
            QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete);
            break;
        }

        usecToSleep = (++nextFrame * AudioConstants::NETWORK_FRAME_USECS) - timer.nsecsElapsed() / 1000; // ns to us

        if (usecToSleep > 0) {
            usleep(usecToSleep);
        }
    }
}
Ejemplo n.º 12
0
void AudioMixer::sendStatsPacket() {
    static QJsonObject statsObject;

    statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
    statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
    statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;

    statsObject["average_listeners_per_frame"] = (float) _sumListeners / (float) _numStatFrames;

    if (_sumListeners > 0) {
        statsObject["average_mixes_per_listener"] = (float) _sumMixes / (float) _sumListeners;
    } else {
        statsObject["average_mixes_per_listener"] = 0.0;
    }

    _sumListeners = 0;
    _sumMixes = 0;
    _numStatFrames = 0;

    QJsonObject readPendingDatagramStats;

    QJsonObject rpdCallsStats;
    rpdCallsStats["calls_per_sec_avg_30s"] = _readPendingCallsPerSecondStats.getWindowAverage();
    rpdCallsStats["calls_last_sec"] = _readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5;

    readPendingDatagramStats["calls"] = rpdCallsStats;

    QJsonObject packetsPerCallStats;
    packetsPerCallStats["avg_30s"] = _datagramsReadPerCallStats.getWindowAverage();
    packetsPerCallStats["avg_1s"] = _datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage();

    readPendingDatagramStats["packets_per_call"] = packetsPerCallStats;

    QJsonObject packetsTimePerCallStats;
    packetsTimePerCallStats["usecs_per_call_avg_30s"] = _timeSpentPerCallStats.getWindowAverage();
    packetsTimePerCallStats["usecs_per_call_avg_1s"] = _timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage();
    packetsTimePerCallStats["prct_time_in_call_30s"] =
        _timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND) * 100.0;
    packetsTimePerCallStats["prct_time_in_call_1s"] =
        _timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;

    readPendingDatagramStats["packets_time_per_call"] = packetsTimePerCallStats;

    QJsonObject hashMatchTimePerCallStats;
    hashMatchTimePerCallStats["usecs_per_hashmatch_avg_30s"] = _timeSpentPerHashMatchCallStats.getWindowAverage();
    hashMatchTimePerCallStats["usecs_per_hashmatch_avg_1s"]
        = _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage();
    hashMatchTimePerCallStats["prct_time_in_hashmatch_30s"]
        = _timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0;
    hashMatchTimePerCallStats["prct_time_in_hashmatch_1s"]
        = _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
    readPendingDatagramStats["hashmatch_time_per_call"] = hashMatchTimePerCallStats;

    statsObject["read_pending_datagrams"] = readPendingDatagramStats;

    // add stats for each listerner
    auto nodeList = DependencyManager::get<NodeList>();
    QJsonObject listenerStats;

    nodeList->eachNode([&](const SharedNodePointer& node) {
        AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
        if (clientData) {
            QJsonObject nodeStats;
            QString uuidString = uuidStringWithoutCurlyBraces(node->getUUID());

            nodeStats["outbound_kbps"] = node->getOutboundBandwidth();
            nodeStats[USERNAME_UUID_REPLACEMENT_STATS_KEY] = uuidString;

            nodeStats["jitter"] = clientData->getAudioStreamStats();

            listenerStats[uuidString] = nodeStats;
        }
    });

    // add the listeners object to the root object
    statsObject["listeners"] = listenerStats;

    // send off the stats packets
    ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
}
Ejemplo n.º 13
0
void NodeBounds::draw() {
    if (!_showEntityNodes) {
        _overlayText[0] = '\0';
        return;
    }

    NodeToJurisdictionMap& entityServerJurisdictions = Application::getInstance()->getEntityServerJurisdictions();
    NodeToJurisdictionMap* serverJurisdictions;

    // Compute ray to find selected nodes later on.  We can't use the pre-computed ray in Application because it centers
    // itself after the cursor disappears.
    Application* application = Application::getInstance();
    PickRay pickRay = application->getCamera()->computePickRay(application->getTrueMouseX(),
                                                               application->getTrueMouseY());

    // Variables to keep track of the selected node and properties to draw the cube later if needed
    Node* selectedNode = NULL;
    float selectedDistance = FLT_MAX;
    bool selectedIsInside = true;
    glm::vec3 selectedCenter;
    float selectedScale = 0;

    auto nodeList = DependencyManager::get<NodeList>();
    nodeList->eachNode([&](const SharedNodePointer& node){
        NodeType_t nodeType = node->getType();
        
        if (nodeType == NodeType::EntityServer && _showEntityNodes) {
            serverJurisdictions = &entityServerJurisdictions;
        } else {
            return;
        }
        
        QUuid nodeUUID = node->getUUID();
        serverJurisdictions->lockForRead();
        if (serverJurisdictions->find(nodeUUID) != serverJurisdictions->end()) {
            const JurisdictionMap& map = (*serverJurisdictions)[nodeUUID];
            
            unsigned char* rootCode = map.getRootOctalCode();
            
            if (rootCode) {
                VoxelPositionSize rootDetails;
                voxelDetailsForCode(rootCode, rootDetails);
                serverJurisdictions->unlock();
                glm::vec3 location(rootDetails.x, rootDetails.y, rootDetails.z);
                
                AACube serverBounds(location, rootDetails.s);
                
                glm::vec3 center = serverBounds.getVertex(BOTTOM_RIGHT_NEAR)
                + ((serverBounds.getVertex(TOP_LEFT_FAR) - serverBounds.getVertex(BOTTOM_RIGHT_NEAR)) / 2.0f);
                
                const float ENTITY_NODE_SCALE = 0.99f;
                
                float scaleFactor = rootDetails.s;
                
                // Scale by 0.92 - 1.00 depending on the scale of the node.  This allows smaller nodes to scale in
                // a bit and not overlap larger nodes.
                scaleFactor *= 0.92f + (rootDetails.s * 0.08f);
                
                // Scale different node types slightly differently because it's common for them to overlap.
                if (nodeType == NodeType::EntityServer) {
                    scaleFactor *= ENTITY_NODE_SCALE;
                }
                
                float red, green, blue;
                getColorForNodeType(nodeType, red, green, blue);
                drawNodeBorder(center, scaleFactor, red, green, blue);
                
                float distance;
                BoxFace face;
                
                bool inside = serverBounds.contains(pickRay.origin);
                bool colliding = serverBounds.findRayIntersection(pickRay.origin, pickRay.direction, distance, face);

                // If the camera is inside a node it will be "selected" if you don't have your cursor over another node
                // that you aren't inside.
                if (colliding && (!selectedNode || (!inside && (distance < selectedDistance || selectedIsInside)))) {
                    selectedNode = node.data();
                    selectedDistance = distance;
                    selectedIsInside = inside;
                    selectedCenter = center;
                    selectedScale = scaleFactor;
                }
            } else {
                serverJurisdictions->unlock();
            }
        } else {
            serverJurisdictions->unlock();
        }
    });

    if (selectedNode) {
        glPushMatrix();

        glTranslatef(selectedCenter.x, selectedCenter.y, selectedCenter.z);
        glScalef(selectedScale, selectedScale, selectedScale);

        float red, green, blue;
        getColorForNodeType(selectedNode->getType(), red, green, blue);

        DependencyManager::get<GeometryCache>()->renderSolidCube(1.0f, glm::vec4(red, green, blue, 0.2f));

        glPopMatrix();

        HifiSockAddr addr = selectedNode->getPublicSocket();
        QString overlay = QString("%1:%2  %3ms")
            .arg(addr.getAddress().toString())
            .arg(addr.getPort())
            .arg(selectedNode->getPingMs())
            .left(MAX_OVERLAY_TEXT_LENGTH);

        // Ideally we'd just use a QString, but I ran into weird blinking issues using
        // constData() directly, as if the data was being overwritten.
        strcpy(_overlayText, overlay.toLocal8Bit().constData());
    } else {
        _overlayText[0] = '\0';
    }
}
Ejemplo n.º 14
0
void AudioMixer::broadcastMixes() {
    auto nodeList = DependencyManager::get<NodeList>();

    auto nextFrameTimestamp = p_high_resolution_clock::now();
    auto timeToSleep = std::chrono::microseconds(0);

    const int TRAILING_AVERAGE_FRAMES = 100;
    int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;

    int currentFrame { 1 };
    int numFramesPerSecond { (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC) };

    while (!_isFinished) {
        const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
        const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;

        const float RATIO_BACK_OFF = 0.02f;

        const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
        const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;

        if (timeToSleep.count() < 0) {
            timeToSleep = std::chrono::microseconds(0);
        }

        _trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio)
            + (timeToSleep.count() * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS);

        float lastCutoffRatio = _performanceThrottlingRatio;
        bool hasRatioChanged = false;

        if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
            if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
                // we're struggling - change our min required loudness to reduce some load
                _performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));

                qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            } else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
                // we've recovered and can back off the required loudness
                _performanceThrottlingRatio = _performanceThrottlingRatio - RATIO_BACK_OFF;

                if (_performanceThrottlingRatio < 0) {
                    _performanceThrottlingRatio = 0;
                }

                qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
                    << lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
                hasRatioChanged = true;
            }

            if (hasRatioChanged) {
                // set out min audability threshold from the new ratio
                _minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
                qDebug() << "Minimum audability required to be mixed is now" << _minAudibilityThreshold;

                framesSinceCutoffEvent = 0;
            }
        }

        if (!hasRatioChanged) {
            ++framesSinceCutoffEvent;
        }

        nodeList->eachNode([&](const SharedNodePointer& node) {

            if (node->getLinkedData()) {
                AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();

                // this function will attempt to pop a frame from each audio stream.
                // a pointer to the popped data is stored as a member in InboundAudioStream.
                // That's how the popped audio data will be read for mixing (but only if the pop was successful)
                nodeData->checkBuffersBeforeFrameSend();

                // if the stream should be muted, send mute packet
                if (nodeData->getAvatarAudioStream()
                    && shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) {
                    auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
                    nodeList->sendPacket(std::move(mutePacket), *node);
                }

                if (node->getType() == NodeType::Agent && node->getActiveSocket()
                    && nodeData->getAvatarAudioStream()) {

                    bool mixHasAudio = prepareMixForListeningNode(node.data());

                    std::unique_ptr<NLPacket> mixPacket;

                    if (mixHasAudio) {
                        int mixPacketBytes = sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE 
                                                             + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
                        mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // write the codec
                        QString codecInPacket = nodeData->getCodecName();
                        mixPacket->writeString(codecInPacket);

                        QByteArray decodedBuffer(reinterpret_cast<char*>(_clampedSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
                        QByteArray encodedBuffer;
                        nodeData->encode(decodedBuffer, encodedBuffer);

                        // pack mixed audio samples
                        mixPacket->write(encodedBuffer.constData(), encodedBuffer.size());
                    } else {
                        int silentPacketBytes = sizeof(quint16) + sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE;
                        mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes);

                        // pack sequence number
                        quint16 sequence = nodeData->getOutgoingSequenceNumber();
                        mixPacket->writePrimitive(sequence);

                        // write the codec
                        QString codecInPacket = nodeData->getCodecName();
                        mixPacket->writeString(codecInPacket);

                        // pack number of silent audio samples
                        quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
                        mixPacket->writePrimitive(numSilentSamples);
                    }

                    // Send audio environment
                    sendAudioEnvironmentPacket(node);

                    // send mixed audio packet
                    nodeList->sendPacket(std::move(mixPacket), *node);
                    nodeData->incrementOutgoingMixedAudioSequenceNumber();

                    // send an audio stream stats packet to the client approximately every second
                    ++currentFrame;
                    currentFrame %= numFramesPerSecond;

                    if (nodeData->shouldSendStats(currentFrame)) {
                        nodeData->sendAudioStreamStatsPackets(node);
                    }

                    ++_sumListeners;
                }
            }
        });

        ++_numStatFrames;

        // since we're a while loop we need to help Qt's event processing
        QCoreApplication::processEvents();

        if (_isFinished) {
            // at this point the audio-mixer is done
            // check if we have a deferred delete event to process (which we should once finished)
            QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete);
            break;
        }

        // push the next frame timestamp to when we should send the next
        nextFrameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);

        // sleep as long as we need until next frame, if we can
        auto now = p_high_resolution_clock::now();
        timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(nextFrameTimestamp - now);

        std::this_thread::sleep_for(timeToSleep);
    }
}