void AudioMixer::run() { commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer); NodeList* nodeList = NodeList::getInstance(); nodeList->addNodeTypeToInterestSet(NodeType::Agent); nodeList->linkedDataCreateCallback = attachNewBufferToNode; int nextFrame = 0; timeval startTime; gettimeofday(&startTime, NULL); int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio); // note: Visual Studio 2010 doesn't support variable sized local arrays #ifdef _WIN32 unsigned char clientPacket[MAX_PACKET_SIZE]; #else unsigned char clientPacket[NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader]; #endif populatePacketHeader(reinterpret_cast<char*>(clientPacket), PacketTypeMixedAudio); while (!_isFinished) { QCoreApplication::processEvents(); if (_isFinished) { break; } foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getLinkedData()) { ((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES); } } foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getType() == NodeType::Agent && node->getActiveSocket() && node->getLinkedData() && ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) { prepareMixForListeningNode(node.data()); memcpy(clientPacket + numBytesPacketHeader, _clientSamples, sizeof(_clientSamples)); nodeList->getNodeSocket().writeDatagram((char*) clientPacket, sizeof(clientPacket), node->getActiveSocket()->getAddress(), node->getActiveSocket()->getPort()); } } // push forward the next output pointers for any audio buffers we used foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getLinkedData()) { ((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend(); } } int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow(); if (usecToSleep > 0) { usleep(usecToSleep); } else { qDebug() << "AudioMixer loop took" << -usecToSleep << "of extra time. Not sleeping."; } } }
void AudioMixer::run() { ThreadedAssignment::commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer); auto nodeList = DependencyManager::get<NodeList>(); nodeList->addNodeTypeToInterestSet(NodeType::Agent); nodeList->linkedDataCreateCallback = [](Node* node) { node->setLinkedData(new AudioMixerClientData()); }; // wait until we have the domain-server settings, otherwise we bail DomainHandler& domainHandler = nodeList->getDomainHandler(); qDebug() << "Waiting for domain settings from domain-server."; // block until we get the settingsRequestComplete signal QEventLoop loop; connect(&domainHandler, &DomainHandler::settingsReceived, &loop, &QEventLoop::quit); connect(&domainHandler, &DomainHandler::settingsReceiveFail, &loop, &QEventLoop::quit); domainHandler.requestDomainSettings(); loop.exec(); if (domainHandler.getSettingsObject().isEmpty()) { qDebug() << "Failed to retreive settings object from domain-server. Bailing on assignment."; setFinished(true); return; } const QJsonObject& settingsObject = domainHandler.getSettingsObject(); // check the settings object to see if we have anything we can parse out parseSettingsObject(settingsObject); int nextFrame = 0; QElapsedTimer timer; timer.start(); int usecToSleep = AudioConstants::NETWORK_FRAME_USECS; const int TRAILING_AVERAGE_FRAMES = 100; int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES; while (!_isFinished) { const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f; const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f; const float RATIO_BACK_OFF = 0.02f; const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES; const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO; if (usecToSleep < 0) { usecToSleep = 0; } _trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) + (usecToSleep * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS); float lastCutoffRatio = _performanceThrottlingRatio; bool hasRatioChanged = false; if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) { if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) { // we're struggling - change our min required loudness to reduce some load _performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio)); qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was" << lastCutoffRatio << "and is now" << _performanceThrottlingRatio; hasRatioChanged = true; } else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) { // we've recovered and can back off the required loudness _performanceThrottlingRatio = _performanceThrottlingRatio - RATIO_BACK_OFF; if (_performanceThrottlingRatio < 0) { _performanceThrottlingRatio = 0; } qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was" << lastCutoffRatio << "and is now" << _performanceThrottlingRatio; hasRatioChanged = true; } if (hasRatioChanged) { // set out min audability threshold from the new ratio _minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio)); qDebug() << "Minimum audability required to be mixed is now" << _minAudibilityThreshold; framesSinceCutoffEvent = 0; } } if (!hasRatioChanged) { ++framesSinceCutoffEvent; } quint64 now = usecTimestampNow(); if (now - _lastPerSecondCallbackTime > USECS_PER_SECOND) { perSecondActions(); _lastPerSecondCallbackTime = now; } nodeList->eachNode([&](const SharedNodePointer& node) { if (node->getLinkedData()) { AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData(); // this function will attempt to pop a frame from each audio stream. // a pointer to the popped data is stored as a member in InboundAudioStream. // That's how the popped audio data will be read for mixing (but only if the pop was successful) nodeData->checkBuffersBeforeFrameSend(); // if the stream should be muted, send mute packet if (nodeData->getAvatarAudioStream() && shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) { auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0); nodeList->sendPacket(std::move(mutePacket), *node); } if (node->getType() == NodeType::Agent && node->getActiveSocket() && nodeData->getAvatarAudioStream()) { int streamsMixed = prepareMixForListeningNode(node.data()); std::unique_ptr<NLPacket> mixPacket; if (streamsMixed > 0) { int mixPacketBytes = sizeof(quint16) + AudioConstants::NETWORK_FRAME_BYTES_STEREO; mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes); // pack sequence number quint16 sequence = nodeData->getOutgoingSequenceNumber(); mixPacket->writePrimitive(sequence); // pack mixed audio samples mixPacket->write(reinterpret_cast<char*>(_mixSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO); } else { int silentPacketBytes = sizeof(quint16) + sizeof(quint16); mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes); // pack sequence number quint16 sequence = nodeData->getOutgoingSequenceNumber(); mixPacket->writePrimitive(sequence); // pack number of silent audio samples quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; mixPacket->writePrimitive(numSilentSamples); } // Send audio environment sendAudioEnvironmentPacket(node); // send mixed audio packet nodeList->sendPacket(std::move(mixPacket), *node); nodeData->incrementOutgoingMixedAudioSequenceNumber(); // send an audio stream stats packet if it's time if (_sendAudioStreamStats) { nodeData->sendAudioStreamStatsPackets(node); _sendAudioStreamStats = false; } ++_sumListeners; } } }); ++_numStatFrames; // since we're a while loop we need to help Qt's event processing QCoreApplication::processEvents(); if (_isFinished) { // at this point the audio-mixer is done // check if we have a deferred delete event to process (which we should once finished) QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete); break; } usecToSleep = (++nextFrame * AudioConstants::NETWORK_FRAME_USECS) - timer.nsecsElapsed() / 1000; // ns to us if (usecToSleep > 0) { usleep(usecToSleep); } } }
void AudioMixer::run() { commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer); NodeList* nodeList = NodeList::getInstance(); nodeList->addNodeTypeToInterestSet(NodeType::Agent); nodeList->linkedDataCreateCallback = attachNewBufferToNode; int nextFrame = 0; timeval startTime; gettimeofday(&startTime, NULL); char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)]; while (!_isFinished) { foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getLinkedData()) { ((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES); } } foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getType() == NodeType::Agent && node->getActiveSocket() && node->getLinkedData() && ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) { prepareMixForListeningNode(node.data()); int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio); memcpy(clientMixBuffer + numBytesPacketHeader, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO); nodeList->writeDatagram(clientMixBuffer, NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader, node); } } // push forward the next output pointers for any audio buffers we used foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getLinkedData()) { ((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend(); } } QCoreApplication::processEvents(); if (_isFinished) { break; } int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow(); if (usecToSleep > 0) { usleep(usecToSleep); } else { qDebug() << "AudioMixer loop took" << -usecToSleep << "of extra time. Not sleeping."; } } delete[] clientMixBuffer; }
void AudioMixer::broadcastMixes() { auto nodeList = DependencyManager::get<NodeList>(); auto nextFrameTimestamp = p_high_resolution_clock::now(); auto timeToSleep = std::chrono::microseconds(0); const int TRAILING_AVERAGE_FRAMES = 100; int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES; int currentFrame { 1 }; int numFramesPerSecond { (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC) }; while (!_isFinished) { const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f; const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f; const float RATIO_BACK_OFF = 0.02f; const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES; const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO; if (timeToSleep.count() < 0) { timeToSleep = std::chrono::microseconds(0); } _trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) + (timeToSleep.count() * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS); float lastCutoffRatio = _performanceThrottlingRatio; bool hasRatioChanged = false; if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) { if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) { // we're struggling - change our min required loudness to reduce some load _performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio)); qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was" << lastCutoffRatio << "and is now" << _performanceThrottlingRatio; hasRatioChanged = true; } else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) { // we've recovered and can back off the required loudness _performanceThrottlingRatio = _performanceThrottlingRatio - RATIO_BACK_OFF; if (_performanceThrottlingRatio < 0) { _performanceThrottlingRatio = 0; } qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was" << lastCutoffRatio << "and is now" << _performanceThrottlingRatio; hasRatioChanged = true; } if (hasRatioChanged) { // set out min audability threshold from the new ratio _minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio)); qDebug() << "Minimum audability required to be mixed is now" << _minAudibilityThreshold; framesSinceCutoffEvent = 0; } } if (!hasRatioChanged) { ++framesSinceCutoffEvent; } nodeList->eachNode([&](const SharedNodePointer& node) { if (node->getLinkedData()) { AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData(); // this function will attempt to pop a frame from each audio stream. // a pointer to the popped data is stored as a member in InboundAudioStream. // That's how the popped audio data will be read for mixing (but only if the pop was successful) nodeData->checkBuffersBeforeFrameSend(); // if the stream should be muted, send mute packet if (nodeData->getAvatarAudioStream() && shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) { auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0); nodeList->sendPacket(std::move(mutePacket), *node); } if (node->getType() == NodeType::Agent && node->getActiveSocket() && nodeData->getAvatarAudioStream()) { bool mixHasAudio = prepareMixForListeningNode(node.data()); std::unique_ptr<NLPacket> mixPacket; if (mixHasAudio) { int mixPacketBytes = sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + AudioConstants::NETWORK_FRAME_BYTES_STEREO; mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes); // pack sequence number quint16 sequence = nodeData->getOutgoingSequenceNumber(); mixPacket->writePrimitive(sequence); // write the codec QString codecInPacket = nodeData->getCodecName(); mixPacket->writeString(codecInPacket); QByteArray decodedBuffer(reinterpret_cast<char*>(_clampedSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO); QByteArray encodedBuffer; nodeData->encode(decodedBuffer, encodedBuffer); // pack mixed audio samples mixPacket->write(encodedBuffer.constData(), encodedBuffer.size()); } else { int silentPacketBytes = sizeof(quint16) + sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE; mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes); // pack sequence number quint16 sequence = nodeData->getOutgoingSequenceNumber(); mixPacket->writePrimitive(sequence); // write the codec QString codecInPacket = nodeData->getCodecName(); mixPacket->writeString(codecInPacket); // pack number of silent audio samples quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; mixPacket->writePrimitive(numSilentSamples); } // Send audio environment sendAudioEnvironmentPacket(node); // send mixed audio packet nodeList->sendPacket(std::move(mixPacket), *node); nodeData->incrementOutgoingMixedAudioSequenceNumber(); // send an audio stream stats packet to the client approximately every second ++currentFrame; currentFrame %= numFramesPerSecond; if (nodeData->shouldSendStats(currentFrame)) { nodeData->sendAudioStreamStatsPackets(node); } ++_sumListeners; } } }); ++_numStatFrames; // since we're a while loop we need to help Qt's event processing QCoreApplication::processEvents(); if (_isFinished) { // at this point the audio-mixer is done // check if we have a deferred delete event to process (which we should once finished) QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete); break; } // push the next frame timestamp to when we should send the next nextFrameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS); // sleep as long as we need until next frame, if we can auto now = p_high_resolution_clock::now(); timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(nextFrameTimestamp - now); std::this_thread::sleep_for(timeToSleep); } }