inline void Audio::performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight) { NodeList* nodeList = NodeList::getInstance(); Application* interface = Application::getInstance(); Avatar* interfaceAvatar = interface->getAvatar(); memset(outputLeft, 0, PACKET_LENGTH_BYTES_PER_CHANNEL); memset(outputRight, 0, PACKET_LENGTH_BYTES_PER_CHANNEL); // Add Procedural effects to input samples addProceduralSounds(inputLeft, outputLeft, outputRight, BUFFER_LENGTH_SAMPLES_PER_CHANNEL); if (nodeList && inputLeft) { // Measure the loudness of the signal from the microphone and store in audio object float loudness = 0; for (int i = 0; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) { loudness += abs(inputLeft[i]); } loudness /= BUFFER_LENGTH_SAMPLES_PER_CHANNEL; _lastInputLoudness = loudness; // add input (@microphone) data to the scope _scope->addSamples(0, inputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL); Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER); if (audioMixer) { audioMixer->lock(); sockaddr_in audioSocket = *(sockaddr_in*) audioMixer->getActiveSocket(); audioMixer->unlock(); glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition(); glm::quat headOrientation = interfaceAvatar->getHead().getOrientation(); int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO); int leadingBytes = numBytesPacketHeader + sizeof(headPosition) + sizeof(headOrientation); // we need the amount of bytes in the buffer + 1 for type // + 12 for 3 floats for position + float for bearing + 1 attenuation byte unsigned char dataPacket[MAX_PACKET_SIZE]; PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoAudio) ? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO : PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO; unsigned char* currentPacketPtr = dataPacket + populateTypeAndVersion(dataPacket, packetType); // pack Source Data uint16_t ownerID = NodeList::getInstance()->getOwnerID(); memcpy(currentPacketPtr, &ownerID, sizeof(ownerID)); currentPacketPtr += (sizeof(ownerID)); leadingBytes += (sizeof(ownerID)); // pack Listen Mode Data memcpy(currentPacketPtr, &_listenMode, sizeof(_listenMode)); currentPacketPtr += (sizeof(_listenMode)); leadingBytes += (sizeof(_listenMode)); if (_listenMode == AudioRingBuffer::OMNI_DIRECTIONAL_POINT) { memcpy(currentPacketPtr, &_listenRadius, sizeof(_listenRadius)); currentPacketPtr += (sizeof(_listenRadius)); leadingBytes += (sizeof(_listenRadius)); } else if (_listenMode == AudioRingBuffer::SELECTED_SOURCES) { int listenSourceCount = _listenSources.size(); memcpy(currentPacketPtr, &listenSourceCount, sizeof(listenSourceCount)); currentPacketPtr += (sizeof(listenSourceCount)); leadingBytes += (sizeof(listenSourceCount)); for (int i = 0; i < listenSourceCount; i++) { memcpy(currentPacketPtr, &_listenSources[i], sizeof(_listenSources[i])); currentPacketPtr += sizeof(_listenSources[i]); leadingBytes += sizeof(_listenSources[i]); } } // memcpy the three float positions memcpy(currentPacketPtr, &headPosition, sizeof(headPosition)); currentPacketPtr += (sizeof(headPosition)); // memcpy our orientation memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation)); currentPacketPtr += sizeof(headOrientation); // copy the audio data to the last BUFFER_LENGTH_BYTES bytes of the data packet memcpy(currentPacketPtr, inputLeft, BUFFER_LENGTH_BYTES_PER_CHANNEL); nodeList->getNodeSocket()->send((sockaddr*) &audioSocket, dataPacket, BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes); interface->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO).updateValue(BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes); } } AudioRingBuffer* ringBuffer = &_ringBuffer; // if there is anything in the ring buffer, decide what to do: if (ringBuffer->getEndOfLastWrite()) { if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < (PACKET_LENGTH_SAMPLES + _jitterBufferSamples * (ringBuffer->isStereo() ? 2 : 1))) { // // If not enough audio has arrived to start playback, keep waiting // #ifdef SHOW_AUDIO_DEBUG qDebug("%i,%i,%i,%i\n", _packetsReceivedThisPlayback, ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES, _jitterBufferSamples); #endif } else if (ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() == 0) { // // If we have started and now have run out of audio to send to the audio device, // this means we've starved and should restart. // ringBuffer->setStarted(false); _numStarves++; _packetsReceivedThisPlayback = 0; _wasStarved = 10; // Frames for which to render the indication that the system was starved. #ifdef SHOW_AUDIO_DEBUG qDebug("Starved, remaining samples = %d\n", ringBuffer->diffLastWriteNextOutput()); #endif } else { // // We are either already playing back, or we have enough audio to start playing back. // if (!ringBuffer->isStarted()) { ringBuffer->setStarted(true); #ifdef SHOW_AUDIO_DEBUG qDebug("starting playback %0.1f msecs delayed, jitter = %d, pkts recvd: %d \n", (usecTimestampNow() - usecTimestamp(&_firstPacketReceivedTime))/1000.0, _jitterBufferSamples, _packetsReceivedThisPlayback); #endif } // // play whatever we have in the audio buffer // // if we haven't fired off the flange effect, check if we should // TODO: lastMeasuredHeadYaw is now relative to body - check if this still works. int lastYawMeasured = fabsf(interfaceAvatar->getHeadYawRate()); if (!_samplesLeftForFlange && lastYawMeasured > MIN_FLANGE_EFFECT_THRESHOLD) { // we should flange for one second if ((_lastYawMeasuredMaximum = std::max(_lastYawMeasuredMaximum, lastYawMeasured)) != lastYawMeasured) { _lastYawMeasuredMaximum = std::min(_lastYawMeasuredMaximum, MIN_FLANGE_EFFECT_THRESHOLD); _samplesLeftForFlange = SAMPLE_RATE; _flangeIntensity = MIN_FLANGE_INTENSITY + ((_lastYawMeasuredMaximum - MIN_FLANGE_EFFECT_THRESHOLD) / (float)(MAX_FLANGE_EFFECT_THRESHOLD - MIN_FLANGE_EFFECT_THRESHOLD)) * (1 - MIN_FLANGE_INTENSITY); _flangeRate = FLANGE_BASE_RATE * _flangeIntensity; _flangeWeight = MAX_FLANGE_SAMPLE_WEIGHT * _flangeIntensity; } } for (int s = 0; s < PACKET_LENGTH_SAMPLES_PER_CHANNEL; s++) { int leftSample = ringBuffer->getNextOutput()[s]; int rightSample = ringBuffer->getNextOutput()[s + PACKET_LENGTH_SAMPLES_PER_CHANNEL]; if (_samplesLeftForFlange > 0) { float exponent = (SAMPLE_RATE - _samplesLeftForFlange - (SAMPLE_RATE / _flangeRate)) / (SAMPLE_RATE / _flangeRate); int sampleFlangeDelay = (SAMPLE_RATE / (1000 * _flangeIntensity)) * powf(2, exponent); if (_samplesLeftForFlange != SAMPLE_RATE || s >= (SAMPLE_RATE / 2000)) { // we have a delayed sample to add to this sample int16_t *flangeFrame = ringBuffer->getNextOutput(); int flangeIndex = s - sampleFlangeDelay; if (flangeIndex < 0) { // we need to grab the flange sample from earlier in the buffer flangeFrame = ringBuffer->getNextOutput() != ringBuffer->getBuffer() ? ringBuffer->getNextOutput() - PACKET_LENGTH_SAMPLES : ringBuffer->getNextOutput() + RING_BUFFER_LENGTH_SAMPLES - PACKET_LENGTH_SAMPLES; flangeIndex = PACKET_LENGTH_SAMPLES_PER_CHANNEL + (s - sampleFlangeDelay); } int16_t leftFlangeSample = flangeFrame[flangeIndex]; int16_t rightFlangeSample = flangeFrame[flangeIndex + PACKET_LENGTH_SAMPLES_PER_CHANNEL]; leftSample = (1 - _flangeWeight) * leftSample + (_flangeWeight * leftFlangeSample); rightSample = (1 - _flangeWeight) * rightSample + (_flangeWeight * rightFlangeSample); _samplesLeftForFlange--; if (_samplesLeftForFlange == 0) { _lastYawMeasuredMaximum = 0; } } } #ifndef TEST_AUDIO_LOOPBACK outputLeft[s] += leftSample; outputRight[s] += rightSample; #else outputLeft[s] += inputLeft[s]; outputRight[s] += inputLeft[s]; #endif } ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES); if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) { ringBuffer->setNextOutput(ringBuffer->getBuffer()); } } } eventuallySendRecvPing(inputLeft, outputLeft, outputRight); // add output (@speakers) data just written to the scope _scope->addSamples(1, outputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL); _scope->addSamples(2, outputRight, BUFFER_LENGTH_SAMPLES_PER_CHANNEL); gettimeofday(&_lastCallbackTime, NULL); }
void AudioInjector::injectAudio() { QByteArray soundByteArray = _sound->getByteArray(); // make sure we actually have samples downloaded to inject if (soundByteArray.size()) { // give our sample byte array to the local audio interface, if we have it, so it can be handled locally if (_options.getLoopbackAudioInterface()) { // assume that localAudioInterface could be on a separate thread, use Qt::AutoConnection to handle properly QMetaObject::invokeMethod(_options.getLoopbackAudioInterface(), "handleAudioByteArray", Qt::AutoConnection, Q_ARG(QByteArray, soundByteArray)); } NodeList* nodeList = NodeList::getInstance(); // setup the packet for injected audio QByteArray injectAudioPacket = byteArrayWithPopulatedHeader(PacketTypeInjectAudio); QDataStream packetStream(&injectAudioPacket, QIODevice::Append); packetStream << QUuid::createUuid(); // pack the flag for loopback uchar loopbackFlag = (uchar) (!_options.getLoopbackAudioInterface()); packetStream << loopbackFlag; // pack the position for injected audio packetStream.writeRawData(reinterpret_cast<const char*>(&_options.getPosition()), sizeof(_options.getPosition())); // pack our orientation for injected audio packetStream.writeRawData(reinterpret_cast<const char*>(&_options.getOrientation()), sizeof(_options.getOrientation())); // pack zero for radius float radius = 0; packetStream << radius; // pack 255 for attenuation byte quint8 volume = MAX_INJECTOR_VOLUME * _options.getVolume(); packetStream << volume; QElapsedTimer timer; timer.start(); int nextFrame = 0; int currentSendPosition = 0; int numPreAudioDataBytes = injectAudioPacket.size(); // loop to send off our audio in NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL byte chunks while (currentSendPosition < soundByteArray.size() && !_shouldStop) { int bytesToCopy = std::min(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL, soundByteArray.size() - currentSendPosition); // resize the QByteArray to the right size injectAudioPacket.resize(numPreAudioDataBytes + bytesToCopy); // copy the next NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL bytes to the packet memcpy(injectAudioPacket.data() + numPreAudioDataBytes, soundByteArray.data() + currentSendPosition, bytesToCopy); // grab our audio mixer from the NodeList, if it exists SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer); // send off this audio packet nodeList->writeDatagram(injectAudioPacket, audioMixer); currentSendPosition += bytesToCopy; // send two packets before the first sleep so the mixer can start playback right away if (currentSendPosition != bytesToCopy && currentSendPosition < soundByteArray.size()) { // not the first packet and not done // sleep for the appropriate time int usecToSleep = (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - timer.nsecsElapsed() / 1000; if (usecToSleep > 0) { usleep(usecToSleep); } } } } emit finished(); }
// display expanded or contracted stats void Stats::display( const float* color, int horizontalOffset, float fps, int packetsPerSecond, int bytesPerSecond, int voxelPacketsToProcess) { GLCanvas* glWidget = Application::getInstance()->getGLWidget(); unsigned int backgroundColor = 0x33333399; int verticalOffset = 0, lines = 0; float scale = 0.10f; float rotation = 0.0f; int font = 2; QLocale locale(QLocale::English); std::stringstream voxelStats; if (_lastHorizontalOffset != horizontalOffset) { resetWidth(glWidget->width(), horizontalOffset); _lastHorizontalOffset = horizontalOffset; } glPointSize(1.0f); // we need to take one avatar out so we don't include ourselves int totalAvatars = Application::getInstance()->getAvatarManager().size() - 1; int totalServers = NodeList::getInstance()->size(); lines = _expanded ? 5 : 3; int columnOneWidth = _generalStatsWidth; PerformanceTimer::tallyAllTimerRecords(); // do this even if we're not displaying them, so they don't stack up if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::DisplayTimingDetails)) { columnOneWidth = _generalStatsWidth + _pingStatsWidth + _geoStatsWidth; // make it 3 columns wide... // we will also include room for 1 line per timing record and a header of 4 lines lines += 4; const QMap<QString, PerformanceTimerRecord>& allRecords = PerformanceTimer::getAllTimerRecords(); QMapIterator<QString, PerformanceTimerRecord> i(allRecords); while (i.hasNext()) { i.next(); if (includeTimingRecord(i.key())) { lines++; } } } drawBackground(backgroundColor, horizontalOffset, 0, columnOneWidth, lines * STATS_PELS_PER_LINE + 10); horizontalOffset += 5; int columnOneHorizontalOffset = horizontalOffset; char serverNodes[30]; sprintf(serverNodes, "Servers: %d", totalServers); char avatarNodes[30]; sprintf(avatarNodes, "Avatars: %d", totalAvatars); char framesPerSecond[30]; sprintf(framesPerSecond, "Framerate: %3.0f FPS", fps); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, serverNodes, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, avatarNodes, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, framesPerSecond, color); if (_expanded) { char packetsPerSecondString[30]; sprintf(packetsPerSecondString, "Pkts/sec: %d", packetsPerSecond); char averageMegabitsPerSecond[30]; sprintf(averageMegabitsPerSecond, "Mbps: %3.2f", (float)bytesPerSecond * 8.0f / 1000000.0f); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, packetsPerSecondString, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, averageMegabitsPerSecond, color); } // TODO: the display of these timing details should all be moved to JavaScript if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::DisplayTimingDetails)) { // Timing details... const int TIMER_OUTPUT_LINE_LENGTH = 1000; char perfLine[TIMER_OUTPUT_LINE_LENGTH]; verticalOffset += STATS_PELS_PER_LINE * 4; // skip 4 lines to be under the other columns drawText(columnOneHorizontalOffset, verticalOffset, scale, rotation, font, "-------------------------------------------------------- Function " "------------------------------------------------------- --msecs- -calls--", color); // First iterate all the records, and for the ones that should be included, insert them into // a new Map sorted by average time... QMap<float, QString> sortedRecords; const QMap<QString, PerformanceTimerRecord>& allRecords = PerformanceTimer::getAllTimerRecords(); QMapIterator<QString, PerformanceTimerRecord> i(allRecords); while (i.hasNext()) { i.next(); if (includeTimingRecord(i.key())) { float averageTime = (float)i.value().getMovingAverage() / (float)USECS_PER_MSEC; sortedRecords.insertMulti(averageTime, i.key()); } } QMapIterator<float, QString> j(sortedRecords); j.toBack(); while (j.hasPrevious()) { j.previous(); QString functionName = j.value(); const PerformanceTimerRecord& record = allRecords.value(functionName); sprintf(perfLine, "%120s: %8.4f [%6llu]", qPrintable(functionName), (float)record.getMovingAverage() / (float)USECS_PER_MSEC, record.getCount()); verticalOffset += STATS_PELS_PER_LINE; drawText(columnOneHorizontalOffset, verticalOffset, scale, rotation, font, perfLine, color); } } verticalOffset = 0; horizontalOffset = _lastHorizontalOffset + _generalStatsWidth +1; if (Menu::getInstance()->isOptionChecked(MenuOption::TestPing)) { int pingAudio = -1, pingAvatar = -1, pingVoxel = -1, pingVoxelMax = -1; NodeList* nodeList = NodeList::getInstance(); SharedNodePointer audioMixerNode = nodeList->soloNodeOfType(NodeType::AudioMixer); SharedNodePointer avatarMixerNode = nodeList->soloNodeOfType(NodeType::AvatarMixer); pingAudio = audioMixerNode ? audioMixerNode->getPingMs() : -1; pingAvatar = avatarMixerNode ? avatarMixerNode->getPingMs() : -1; // Now handle voxel servers, since there could be more than one, we average their ping times unsigned long totalPingVoxel = 0; int voxelServerCount = 0; foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { // TODO: this should also support entities if (node->getType() == NodeType::VoxelServer) { totalPingVoxel += node->getPingMs(); voxelServerCount++; if (pingVoxelMax < node->getPingMs()) { pingVoxelMax = node->getPingMs(); } } } if (voxelServerCount) { pingVoxel = totalPingVoxel/voxelServerCount; } lines = _expanded ? 4 : 3; // only draw our background if column one didn't draw a wide background if (columnOneWidth == _generalStatsWidth) { drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10); } horizontalOffset += 5; char audioPing[30]; if (pingAudio >= 0) { sprintf(audioPing, "Audio ping: %d", pingAudio); } else { sprintf(audioPing, "Audio ping: --"); } char avatarPing[30]; if (pingAvatar >= 0) { sprintf(avatarPing, "Avatar ping: %d", pingAvatar); } else { sprintf(avatarPing, "Avatar ping: --"); } char voxelAvgPing[30]; if (pingVoxel >= 0) { sprintf(voxelAvgPing, "Voxel avg ping: %d", pingVoxel); } else { sprintf(voxelAvgPing, "Voxel avg ping: --"); } verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioPing, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, avatarPing, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelAvgPing, color); if (_expanded) { char voxelMaxPing[30]; if (pingVoxel >= 0) { // Average is only meaningful if pingVoxel is valid. sprintf(voxelMaxPing, "Voxel max ping: %d", pingVoxelMax); } else { sprintf(voxelMaxPing, "Voxel max ping: --"); } verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color); } verticalOffset = 0; horizontalOffset = _lastHorizontalOffset + _generalStatsWidth + _pingStatsWidth + 2; }
int main(int argc, const char* argv[]) { qInstallMessageHandler(Logging::verboseMessageHandler); NodeList* nodeList = NodeList::createInstance(NODE_TYPE_DOMAIN, DOMAIN_LISTEN_PORT); setvbuf(stdout, NULL, _IOLBF, 0); ssize_t receivedBytes = 0; char nodeType = '\0'; unsigned char broadcastPacket[MAX_PACKET_SIZE]; unsigned char* currentBufferPos; unsigned char* startPointer; sockaddr_in nodePublicAddress, nodeLocalAddress, replyDestinationSocket; nodeLocalAddress.sin_family = AF_INET; in_addr_t serverLocalAddress = getLocalAddress(); nodeList->startSilentNodeRemovalThread(); timeval lastStatSendTime = {}; const char ASSIGNMENT_SERVER_OPTION[] = "-a"; // grab the overriden assignment-server hostname from argv, if it exists const char* customAssignmentServer = getCmdOption(argc, argv, ASSIGNMENT_SERVER_OPTION); if (customAssignmentServer) { sockaddr_in customAssignmentSocket = socketForHostnameAndHostOrderPort(customAssignmentServer, ASSIGNMENT_SERVER_PORT); nodeList->setAssignmentServerSocket((sockaddr*) &customAssignmentSocket); } // use a map to keep track of iterations of silence for assignment creation requests const long long GLOBAL_ASSIGNMENT_REQUEST_INTERVAL_USECS = 1 * 1000 * 1000; timeval lastGlobalAssignmentRequest = {}; // as a domain-server we will always want an audio mixer and avatar mixer // setup the create assignments for those Assignment audioMixerAssignment(Assignment::CreateCommand, Assignment::AudioMixerType, Assignment::LocalLocation); Assignment avatarMixerAssignment(Assignment::CreateCommand, Assignment::AvatarMixerType, Assignment::LocalLocation); // construct a local socket to send with our created assignments to the global AS sockaddr_in localSocket = {}; localSocket.sin_family = AF_INET; localSocket.sin_port = htons(nodeList->getInstance()->getNodeSocket()->getListeningPort()); localSocket.sin_addr.s_addr = serverLocalAddress; // setup the mongoose web server struct mg_context *ctx; struct mg_callbacks callbacks = {}; // list of options. Last element must be NULL. const char *options[] = {"listening_ports", "8080", "document_root", "./resources/web", NULL}; callbacks.begin_request = mongooseRequestHandler; callbacks.upload = mongooseUploadHandler; // Start the web server. ctx = mg_start(&callbacks, NULL, options); while (true) { ::assignmentQueueMutex.lock(); // check if our audio-mixer or avatar-mixer are dead and we don't have existing assignments in the queue // so we can add those assignments back to the front of the queue since they are high-priority if (!nodeList->soloNodeOfType(NODE_TYPE_AVATAR_MIXER) && std::find(::assignmentQueue.begin(), assignmentQueue.end(), &avatarMixerAssignment) == ::assignmentQueue.end()) { qDebug("Missing an avatar mixer and assignment not in queue. Adding.\n"); ::assignmentQueue.push_front(&avatarMixerAssignment); } if (!nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER) && std::find(::assignmentQueue.begin(), ::assignmentQueue.end(), &audioMixerAssignment) == ::assignmentQueue.end()) { qDebug("Missing an audio mixer and assignment not in queue. Adding.\n"); ::assignmentQueue.push_front(&audioMixerAssignment); } ::assignmentQueueMutex.unlock(); while (nodeList->getNodeSocket()->receive((sockaddr *)&nodePublicAddress, packetData, &receivedBytes) && packetVersionMatch(packetData)) { if (packetData[0] == PACKET_TYPE_DOMAIN_REPORT_FOR_DUTY || packetData[0] == PACKET_TYPE_DOMAIN_LIST_REQUEST) { // this is an RFD or domain list request packet, and there is a version match std::map<char, Node *> newestSoloNodes; int numBytesSenderHeader = numBytesForPacketHeader(packetData); nodeType = *(packetData + numBytesSenderHeader); int numBytesSocket = unpackSocket(packetData + numBytesSenderHeader + sizeof(NODE_TYPE), (sockaddr*) &nodeLocalAddress); replyDestinationSocket = nodePublicAddress; // check the node public address // if it matches our local address // or if it's the loopback address we're on the same box if (nodePublicAddress.sin_addr.s_addr == serverLocalAddress || nodePublicAddress.sin_addr.s_addr == htonl(INADDR_LOOPBACK)) { nodePublicAddress.sin_addr.s_addr = 0; } Node* newNode = nodeList->addOrUpdateNode((sockaddr*) &nodePublicAddress, (sockaddr*) &nodeLocalAddress, nodeType, nodeList->getLastNodeID()); // if addOrUpdateNode returns NULL this was a solo node we already have, don't talk back to it if (newNode) { if (newNode->getNodeID() == nodeList->getLastNodeID()) { nodeList->increaseNodeID(); } int numHeaderBytes = populateTypeAndVersion(broadcastPacket, PACKET_TYPE_DOMAIN); currentBufferPos = broadcastPacket + numHeaderBytes; startPointer = currentBufferPos; unsigned char* nodeTypesOfInterest = packetData + numBytesSenderHeader + sizeof(NODE_TYPE) + numBytesSocket + sizeof(unsigned char); int numInterestTypes = *(nodeTypesOfInterest - 1); if (numInterestTypes > 0) { // if the node has sent no types of interest, assume they want nothing but their own ID back for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) { if (!node->matches((sockaddr*) &nodePublicAddress, (sockaddr*) &nodeLocalAddress, nodeType) && memchr(nodeTypesOfInterest, node->getType(), numInterestTypes)) { // this is not the node themselves // and this is an node of a type in the passed node types of interest // or the node did not pass us any specific types they are interested in if (memchr(SOLO_NODE_TYPES, node->getType(), sizeof(SOLO_NODE_TYPES)) == NULL) { // this is an node of which there can be multiple, just add them to the packet // don't send avatar nodes to other avatars, that will come from avatar mixer if (nodeType != NODE_TYPE_AGENT || node->getType() != NODE_TYPE_AGENT) { currentBufferPos = addNodeToBroadcastPacket(currentBufferPos, &(*node)); } } else { // solo node, we need to only send newest if (newestSoloNodes[node->getType()] == NULL || newestSoloNodes[node->getType()]->getWakeMicrostamp() < node->getWakeMicrostamp()) { // we have to set the newer solo node to add it to the broadcast later newestSoloNodes[node->getType()] = &(*node); } } } } for (std::map<char, Node *>::iterator soloNode = newestSoloNodes.begin(); soloNode != newestSoloNodes.end(); soloNode++) { // this is the newest alive solo node, add them to the packet currentBufferPos = addNodeToBroadcastPacket(currentBufferPos, soloNode->second); } } // update last receive to now uint64_t timeNow = usecTimestampNow(); newNode->setLastHeardMicrostamp(timeNow); if (packetData[0] == PACKET_TYPE_DOMAIN_REPORT_FOR_DUTY && memchr(SOLO_NODE_TYPES, nodeType, sizeof(SOLO_NODE_TYPES))) { newNode->setWakeMicrostamp(timeNow); } // add the node ID to the end of the pointer currentBufferPos += packNodeId(currentBufferPos, newNode->getNodeID()); // send the constructed list back to this node nodeList->getNodeSocket()->send((sockaddr*)&replyDestinationSocket, broadcastPacket, (currentBufferPos - startPointer) + numHeaderBytes); } } else if (packetData[0] == PACKET_TYPE_REQUEST_ASSIGNMENT) { qDebug("Received a request for assignment.\n"); ::assignmentQueueMutex.lock(); // this is an unassigned client talking to us directly for an assignment // go through our queue and see if there are any assignments to give out std::deque<Assignment*>::iterator assignment = ::assignmentQueue.begin(); while (assignment != ::assignmentQueue.end()) { // give this assignment out, no conditions stop us from giving it to the local assignment client int numHeaderBytes = populateTypeAndVersion(broadcastPacket, PACKET_TYPE_CREATE_ASSIGNMENT); int numAssignmentBytes = (*assignment)->packToBuffer(broadcastPacket + numHeaderBytes); nodeList->getNodeSocket()->send((sockaddr*) &nodePublicAddress, broadcastPacket, numHeaderBytes + numAssignmentBytes); // remove the assignment from the queue ::assignmentQueue.erase(assignment); if ((*assignment)->getType() == Assignment::AgentType) { // if this is a script assignment we need to delete it to avoid a memory leak delete *assignment; } // stop looping, we've handed out an assignment break; } ::assignmentQueueMutex.unlock(); } } // if ASSIGNMENT_REQUEST_INTERVAL_USECS have passed since last global assignment request then fire off another if (usecTimestampNow() - usecTimestamp(&lastGlobalAssignmentRequest) >= GLOBAL_ASSIGNMENT_REQUEST_INTERVAL_USECS) { gettimeofday(&lastGlobalAssignmentRequest, NULL); ::assignmentQueueMutex.lock(); // go through our queue and see if there are any assignments to send to the global assignment server std::deque<Assignment*>::iterator assignment = ::assignmentQueue.begin(); while (assignment != assignmentQueue.end()) { if ((*assignment)->getLocation() != Assignment::LocalLocation) { // attach our local socket to the assignment so the assignment-server can optionally hand it out (*assignment)->setAttachedLocalSocket((sockaddr*) &localSocket); nodeList->sendAssignment(*(*assignment)); // remove the assignment from the queue ::assignmentQueue.erase(assignment); if ((*assignment)->getType() == Assignment::AgentType) { // if this is a script assignment we need to delete it to avoid a memory leak delete *assignment; } // stop looping, we've handed out an assignment break; } else { // push forward the iterator to check the next assignment assignment++; } } ::assignmentQueueMutex.unlock(); } if (Logging::shouldSendStats()) { if (usecTimestampNow() - usecTimestamp(&lastStatSendTime) >= (NODE_COUNT_STAT_INTERVAL_MSECS * 1000)) { // time to send our count of nodes and servers to logstash const char NODE_COUNT_LOGSTASH_KEY[] = "ds-node-count"; Logging::stashValue(STAT_TYPE_TIMER, NODE_COUNT_LOGSTASH_KEY, nodeList->getNumAliveNodes()); gettimeofday(&lastStatSendTime, NULL); } } } return 0; }
// display expanded or contracted stats void Stats::display( const float* color, int horizontalOffset, float fps, int packetsPerSecond, int bytesPerSecond, int voxelPacketsToProcess) { QGLWidget* glWidget = Application::getInstance()->getGLWidget(); unsigned int backgroundColor = 0x33333399; int verticalOffset = 0, lines = 0; QLocale locale(QLocale::English); std::stringstream voxelStats; if (_lastHorizontalOffset != horizontalOffset) { resetWidth(glWidget->width(), horizontalOffset); _lastHorizontalOffset = horizontalOffset; } glPointSize(1.0f); // we need to take one avatar out so we don't include ourselves int totalAvatars = Application::getInstance()->getAvatarManager().size() - 1; int totalServers = NodeList::getInstance()->size(); lines = _expanded ? 5 : 3; drawBackground(backgroundColor, horizontalOffset, 0, _generalStatsWidth, lines * STATS_PELS_PER_LINE + 10); horizontalOffset += 5; char serverNodes[30]; sprintf(serverNodes, "Servers: %d", totalServers); char avatarNodes[30]; sprintf(avatarNodes, "Avatars: %d", totalAvatars); char framesPerSecond[30]; sprintf(framesPerSecond, "Framerate: %3.0f FPS", fps); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, serverNodes, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, avatarNodes, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, framesPerSecond, color); if (_expanded) { char packetsPerSecondString[30]; sprintf(packetsPerSecondString, "Pkts/sec: %d", packetsPerSecond); char averageMegabitsPerSecond[30]; sprintf(averageMegabitsPerSecond, "Mbps: %3.2f", (float)bytesPerSecond * 8.f / 1000000.f); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, packetsPerSecondString, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, averageMegabitsPerSecond, color); } verticalOffset = 0; horizontalOffset = _lastHorizontalOffset + _generalStatsWidth +1; if (Menu::getInstance()->isOptionChecked(MenuOption::TestPing)) { int pingAudio = 0, pingAvatar = 0, pingVoxel = 0, pingVoxelMax = 0; NodeList* nodeList = NodeList::getInstance(); SharedNodePointer audioMixerNode = nodeList->soloNodeOfType(NodeType::AudioMixer); SharedNodePointer avatarMixerNode = nodeList->soloNodeOfType(NodeType::AvatarMixer); pingAudio = audioMixerNode ? audioMixerNode->getPingMs() : 0; pingAvatar = avatarMixerNode ? avatarMixerNode->getPingMs() : 0; // Now handle voxel servers, since there could be more than one, we average their ping times unsigned long totalPingVoxel = 0; int voxelServerCount = 0; foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getType() == NodeType::VoxelServer) { totalPingVoxel += node->getPingMs(); voxelServerCount++; if (pingVoxelMax < node->getPingMs()) { pingVoxelMax = node->getPingMs(); } } } if (voxelServerCount) { pingVoxel = totalPingVoxel/voxelServerCount; } lines = _expanded ? 4 : 3; drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10); horizontalOffset += 5; Audio* audio = Application::getInstance()->getAudio(); char audioJitter[30]; sprintf(audioJitter, "Buffer msecs %.1f", (float) (audio->getNetworkBufferLengthSamplesPerChannel() + (float) audio->getJitterBufferSamples()) / (float) audio->getNetworkSampleRate() * 1000.f); drawText(30, glWidget->height() - 22, 0.10f, 0.f, 2.f, audioJitter, color); char audioPing[30]; sprintf(audioPing, "Audio ping: %d", pingAudio); char avatarPing[30]; sprintf(avatarPing, "Avatar ping: %d", pingAvatar); char voxelAvgPing[30]; sprintf(voxelAvgPing, "Voxel avg ping: %d", pingVoxel); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, audioPing, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, avatarPing, color); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, voxelAvgPing, color); if (_expanded) { char voxelMaxPing[30]; sprintf(voxelMaxPing, "Voxel max ping: %d", pingVoxelMax); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, voxelMaxPing, color); } verticalOffset = 0; horizontalOffset = _lastHorizontalOffset + _generalStatsWidth + _pingStatsWidth + 2; }