void OctreeQueryNode::resetOctreePacket() { // if shutting down, return immediately if (_isShuttingDown) { return; } // Whenever we call this, we will keep a copy of the last packet, so we can determine if the last packet has // changed since we last reset it. Since we know that no two packets can ever be identical without being the same // scene information, (e.g. the root node packet of a static scene), we can use this as a strategy for reducing // packet send rate. _lastOctreePacketLength = _octreePacket->getPayloadSize(); memcpy(_lastOctreePayload.data(), _octreePacket->getPayload(), _lastOctreePacketLength); // If we're moving, and the client asked for low res, then we force monochrome, otherwise, use // the clients requested color state. OCTREE_PACKET_FLAGS flags = 0; setAtBit(flags, PACKET_IS_COLOR_BIT); // always color setAtBit(flags, PACKET_IS_COMPRESSED_BIT); // always compressed _octreePacket->reset(); // pack in flags _octreePacket->writePrimitive(flags); // pack in sequence number _octreePacket->writePrimitive(_sequenceNumber); // pack in timestamp OCTREE_PACKET_SENT_TIME now = usecTimestampNow(); _octreePacket->writePrimitive(now); _octreePacketWaiting = false; }
void OctreeQueryNode::resetOctreePacket() { // if shutting down, return immediately if (_isShuttingDown) { return; } // Whenever we call this, we will keep a copy of the last packet, so we can determine if the last packet has // changed since we last reset it. Since we know that no two packets can ever be identical without being the same // scene information, (e.g. the root node packet of a static scene), we can use this as a strategy for reducing // packet send rate. _lastOctreePacketLength = getPacketLength(); memcpy(_lastOctreePacket, _octreePacket, _lastOctreePacketLength); // If we're moving, and the client asked for low res, then we force monochrome, otherwise, use // the clients requested color state. _currentPacketIsColor = getWantColor(); _currentPacketIsCompressed = getWantCompression(); OCTREE_PACKET_FLAGS flags = 0; if (_currentPacketIsColor) { setAtBit(flags,PACKET_IS_COLOR_BIT); } if (_currentPacketIsCompressed) { setAtBit(flags,PACKET_IS_COMPRESSED_BIT); } _octreePacketAvailableBytes = MAX_PACKET_SIZE; int numBytesPacketHeader = populatePacketHeader(reinterpret_cast<char*>(_octreePacket), _myPacketType); _octreePacketAt = _octreePacket + numBytesPacketHeader; _octreePacketAvailableBytes -= numBytesPacketHeader; // pack in flags OCTREE_PACKET_FLAGS* flagsAt = (OCTREE_PACKET_FLAGS*)_octreePacketAt; *flagsAt = flags; _octreePacketAt += sizeof(OCTREE_PACKET_FLAGS); _octreePacketAvailableBytes -= sizeof(OCTREE_PACKET_FLAGS); // pack in sequence number OCTREE_PACKET_SEQUENCE* sequenceAt = (OCTREE_PACKET_SEQUENCE*)_octreePacketAt; *sequenceAt = _sequenceNumber; _octreePacketAt += sizeof(OCTREE_PACKET_SEQUENCE); _octreePacketAvailableBytes -= sizeof(OCTREE_PACKET_SEQUENCE); // pack in timestamp OCTREE_PACKET_SENT_TIME now = usecTimestampNow(); OCTREE_PACKET_SENT_TIME* timeAt = (OCTREE_PACKET_SENT_TIME*)_octreePacketAt; *timeAt = now; _octreePacketAt += sizeof(OCTREE_PACKET_SENT_TIME); _octreePacketAvailableBytes -= sizeof(OCTREE_PACKET_SENT_TIME); _octreePacketWaiting = false; }
int OctreeQuery::getBroadcastData(unsigned char* destinationBuffer) { unsigned char* bufferStart = destinationBuffer; // TODO: DRY this up to a shared method // that can pack any type given the number of bytes // and return the number of bytes to push the pointer // camera details memcpy(destinationBuffer, &_cameraPosition, sizeof(_cameraPosition)); destinationBuffer += sizeof(_cameraPosition); destinationBuffer += packOrientationQuatToBytes(destinationBuffer, _cameraOrientation); destinationBuffer += packFloatAngleToTwoByte(destinationBuffer, _cameraFov); destinationBuffer += packFloatRatioToTwoByte(destinationBuffer, _cameraAspectRatio); destinationBuffer += packClipValueToTwoByte(destinationBuffer, _cameraNearClip); destinationBuffer += packClipValueToTwoByte(destinationBuffer, _cameraFarClip); memcpy(destinationBuffer, &_cameraEyeOffsetPosition, sizeof(_cameraEyeOffsetPosition)); destinationBuffer += sizeof(_cameraEyeOffsetPosition); // bitMask of less than byte wide items unsigned char bitItems = 0; // NOTE: we need to keep these here for new clients to talk to old servers. After we know that the clients and // servers and clients have all been updated we could remove these bits. New servers will always force these // features on old clients even if they don't ask for them. (which old clients will properly handle). New clients // will always ask for these so that old servers will use these features. setAtBit(bitItems, WANT_LOW_RES_MOVING_BIT); setAtBit(bitItems, WANT_COLOR_AT_BIT); setAtBit(bitItems, WANT_DELTA_AT_BIT); setAtBit(bitItems, WANT_COMPRESSION); *destinationBuffer++ = bitItems; // desired Max Octree PPS memcpy(destinationBuffer, &_maxQueryPPS, sizeof(_maxQueryPPS)); destinationBuffer += sizeof(_maxQueryPPS); // desired voxelSizeScale memcpy(destinationBuffer, &_octreeElementSizeScale, sizeof(_octreeElementSizeScale)); destinationBuffer += sizeof(_octreeElementSizeScale); // desired boundaryLevelAdjust memcpy(destinationBuffer, &_boundaryLevelAdjust, sizeof(_boundaryLevelAdjust)); destinationBuffer += sizeof(_boundaryLevelAdjust); memcpy(destinationBuffer, &_cameraCenterRadius, sizeof(_cameraCenterRadius)); destinationBuffer += sizeof(_cameraCenterRadius); return destinationBuffer - bufferStart; }
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) { bool hasReverb = false; float reverbTime, wetLevel; auto& reverbSettings = AudioMixer::getReverbSettings(); auto& audioZones = AudioMixer::getAudioZones(); AvatarAudioStream* stream = data.getAvatarAudioStream(); glm::vec3 streamPosition = stream->getPosition(); // find reverb properties for (int i = 0; i < reverbSettings.size(); ++i) { AABox box = audioZones[reverbSettings[i].zone]; if (box.contains(streamPosition)) { hasReverb = true; reverbTime = reverbSettings[i].reverbTime; wetLevel = reverbSettings[i].wetLevel; break; } } // check if data changed bool dataChanged = (stream->hasReverb() != hasReverb) || (stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel)); if (dataChanged) { // update stream if (hasReverb) { stream->setReverb(reverbTime, wetLevel); } else { stream->clearReverb(); } } // send packet at change or every so often float CHANCE_OF_SEND = 0.01f; bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND); if (sendData) { // size the packet unsigned char bitset = 0; int packetSize = sizeof(bitset); if (hasReverb) { packetSize += sizeof(reverbTime) + sizeof(wetLevel); } // write the packet auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize); if (hasReverb) { setAtBit(bitset, HAS_REVERB_BIT); } envPacket->writePrimitive(bitset); if (hasReverb) { envPacket->writePrimitive(reverbTime); envPacket->writePrimitive(wetLevel); } // send the packet DependencyManager::get<NodeList>()->sendPacket(std::move(envPacket), *node); } }
int OctreeQuery::getBroadcastData(unsigned char* destinationBuffer) { unsigned char* bufferStart = destinationBuffer; // TODO: DRY this up to a shared method // that can pack any type given the number of bytes // and return the number of bytes to push the pointer // camera details memcpy(destinationBuffer, &_cameraPosition, sizeof(_cameraPosition)); destinationBuffer += sizeof(_cameraPosition); destinationBuffer += packOrientationQuatToBytes(destinationBuffer, _cameraOrientation); destinationBuffer += packFloatAngleToTwoByte(destinationBuffer, _cameraFov); destinationBuffer += packFloatRatioToTwoByte(destinationBuffer, _cameraAspectRatio); destinationBuffer += packClipValueToTwoByte(destinationBuffer, _cameraNearClip); destinationBuffer += packClipValueToTwoByte(destinationBuffer, _cameraFarClip); memcpy(destinationBuffer, &_cameraEyeOffsetPosition, sizeof(_cameraEyeOffsetPosition)); destinationBuffer += sizeof(_cameraEyeOffsetPosition); // bitMask of less than byte wide items unsigned char bitItems = 0; if (_wantLowResMoving) { setAtBit(bitItems, WANT_LOW_RES_MOVING_BIT); } if (_wantColor) { setAtBit(bitItems, WANT_COLOR_AT_BIT); } if (_wantDelta) { setAtBit(bitItems, WANT_DELTA_AT_BIT); } if (_wantOcclusionCulling) { setAtBit(bitItems, WANT_OCCLUSION_CULLING_BIT); } if (_wantCompression) { setAtBit(bitItems, WANT_COMPRESSION); } *destinationBuffer++ = bitItems; // desired Max Octree PPS memcpy(destinationBuffer, &_maxOctreePPS, sizeof(_maxOctreePPS)); destinationBuffer += sizeof(_maxOctreePPS); // desired voxelSizeScale memcpy(destinationBuffer, &_octreeElementSizeScale, sizeof(_octreeElementSizeScale)); destinationBuffer += sizeof(_octreeElementSizeScale); // desired boundaryLevelAdjust memcpy(destinationBuffer, &_boundaryLevelAdjust, sizeof(_boundaryLevelAdjust)); destinationBuffer += sizeof(_boundaryLevelAdjust); return destinationBuffer - bufferStart; }
void OctreeElement::printDebugDetails(const char* label) const { unsigned char childBits = 0; for (int i = 0; i < NUMBER_OF_CHILDREN; i++) { OctreeElement* childAt = getChildAtIndex(i); if (childAt) { setAtBit(childBits,i); } } QDebug elementDebug = qDebug().nospace(); QString resultString; resultString.sprintf("%s - Voxel at corner=(%f,%f,%f) size=%f\n isLeaf=%s isDirty=%s shouldRender=%s\n children=", label, (double)_cube.getCorner().x, (double)_cube.getCorner().y, (double)_cube.getCorner().z, (double)_cube.getScale(), debug::valueOf(isLeaf()), debug::valueOf(isDirty()), debug::valueOf(getShouldRender())); elementDebug << resultString; outputBits(childBits, &elementDebug); qDebug("octalCode="); printOctalCode(getOctalCode()); }
void OctreeElement::setChildAtIndex(int childIndex, OctreeElement* child) { #ifdef SIMPLE_CHILD_ARRAY int previousChildCount = getChildCount(); if (child) { setAtBit(_childBitmask, childIndex); } else { clearAtBit(_childBitmask, childIndex); } int newChildCount = getChildCount(); // store the child in our child array _simpleChildArray[childIndex] = child; // track our population data if (previousChildCount != newChildCount) { _childrenCount[previousChildCount]--; _childrenCount[newChildCount]++; } #endif #ifdef SIMPLE_EXTERNAL_CHILDREN int firstIndex = getNthBit(_childBitmask, 1); int secondIndex = getNthBit(_childBitmask, 2); int previousChildCount = getChildCount(); if (child) { setAtBit(_childBitmask, childIndex); } else { clearAtBit(_childBitmask, childIndex); } int newChildCount = getChildCount(); // track our population data if (previousChildCount != newChildCount) { _childrenCount[previousChildCount]--; _childrenCount[newChildCount]++; } if ((previousChildCount == 0 || previousChildCount == 1) && newChildCount == 0) { _children.single = NULL; } else if (previousChildCount == 0 && newChildCount == 1) { _children.single = child; } else if (previousChildCount == 1 && newChildCount == 2) { OctreeElement* previousChild = _children.single; _children.external = new OctreeElement*[NUMBER_OF_CHILDREN]; memset(_children.external, 0, sizeof(OctreeElement*) * NUMBER_OF_CHILDREN); _children.external[firstIndex] = previousChild; _children.external[childIndex] = child; _childrenExternal = true; _externalChildrenMemoryUsage += NUMBER_OF_CHILDREN * sizeof(OctreeElement*); } else if (previousChildCount == 2 && newChildCount == 1) { assert(!child); // we are removing a child, so this must be true! OctreeElement* previousFirstChild = _children.external[firstIndex]; OctreeElement* previousSecondChild = _children.external[secondIndex]; delete[] _children.external; _childrenExternal = false; _externalChildrenMemoryUsage -= NUMBER_OF_CHILDREN * sizeof(OctreeElement*); if (childIndex == firstIndex) { _children.single = previousSecondChild; } else { _children.single = previousFirstChild; } } else { _children.external[childIndex] = child; } #endif // def SIMPLE_EXTERNAL_CHILDREN }
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) { // Send stream properties bool hasReverb = false; float reverbTime, wetLevel; // find reverb properties for (int i = 0; i < _zoneReverbSettings.size(); ++i) { AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData()); glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition(); AABox box = _audioZones[_zoneReverbSettings[i].zone]; if (box.contains(streamPosition)) { hasReverb = true; reverbTime = _zoneReverbSettings[i].reverbTime; wetLevel = _zoneReverbSettings[i].wetLevel; // Modulate wet level with distance to wall float MIN_ATTENUATION_DISTANCE = 2.0f; float MAX_ATTENUATION = -12; // dB glm::vec3 distanceToWalls = (box.getDimensions() / 2.0f) - glm::abs(streamPosition - box.calcCenter()); float distanceToClosestWall = glm::min(distanceToWalls.x, distanceToWalls.z); if (distanceToClosestWall < MIN_ATTENUATION_DISTANCE) { wetLevel += MAX_ATTENUATION * (1.0f - distanceToClosestWall / MIN_ATTENUATION_DISTANCE); } break; } } AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData()); AvatarAudioStream* stream = nodeData->getAvatarAudioStream(); bool dataChanged = (stream->hasReverb() != hasReverb) || (stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel)); if (dataChanged) { // Update stream if (hasReverb) { stream->setReverb(reverbTime, wetLevel); } else { stream->clearReverb(); } } // Send at change or every so often float CHANCE_OF_SEND = 0.01f; bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND); if (sendData) { auto nodeList = DependencyManager::get<NodeList>(); unsigned char bitset = 0; int packetSize = sizeof(bitset); if (hasReverb) { packetSize += sizeof(reverbTime) + sizeof(wetLevel); } auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize); if (hasReverb) { setAtBit(bitset, HAS_REVERB_BIT); } envPacket->writePrimitive(bitset); if (hasReverb) { envPacket->writePrimitive(reverbTime); envPacket->writePrimitive(wetLevel); } nodeList->sendPacket(std::move(envPacket), *node); } }