// called on the other nodes - assigns it to my views of the others int OctreeQuery::parseData(ReceivedMessage& message) { const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(message.getRawMessage()); const unsigned char* sourceBuffer = startPosition; // check if this query uses a view frustum memcpy(&_usesFrustum, sourceBuffer, sizeof(_usesFrustum)); sourceBuffer += sizeof(_usesFrustum); if (_usesFrustum) { // unpack camera details memcpy(&_cameraPosition, sourceBuffer, sizeof(_cameraPosition)); sourceBuffer += sizeof(_cameraPosition); sourceBuffer += unpackOrientationQuatFromBytes(sourceBuffer, _cameraOrientation); sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &_cameraFov); sourceBuffer += unpackFloatRatioFromTwoByte(sourceBuffer,_cameraAspectRatio); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraNearClip); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraFarClip); memcpy(&_cameraEyeOffsetPosition, sourceBuffer, sizeof(_cameraEyeOffsetPosition)); sourceBuffer += sizeof(_cameraEyeOffsetPosition); } // desired Max Octree PPS memcpy(&_maxQueryPPS, sourceBuffer, sizeof(_maxQueryPPS)); sourceBuffer += sizeof(_maxQueryPPS); // desired _octreeElementSizeScale memcpy(&_octreeElementSizeScale, sourceBuffer, sizeof(_octreeElementSizeScale)); sourceBuffer += sizeof(_octreeElementSizeScale); // desired boundaryLevelAdjust memcpy(&_boundaryLevelAdjust, sourceBuffer, sizeof(_boundaryLevelAdjust)); sourceBuffer += sizeof(_boundaryLevelAdjust); memcpy(&_cameraCenterRadius, sourceBuffer, sizeof(_cameraCenterRadius)); sourceBuffer += sizeof(_cameraCenterRadius); // check if we have a packed JSON filter uint16_t binaryParametersBytes; memcpy(&binaryParametersBytes, sourceBuffer, sizeof(binaryParametersBytes)); sourceBuffer += sizeof(binaryParametersBytes); if (binaryParametersBytes > 0) { // unpack the binary JSON parameters QByteArray binaryJSONParameters { binaryParametersBytes, 0 }; memcpy(binaryJSONParameters.data(), sourceBuffer, binaryParametersBytes); sourceBuffer += binaryParametersBytes; // grab the parameter object from the packed binary representation of JSON auto newJsonDocument = QJsonDocument::fromBinaryData(binaryJSONParameters); QWriteLocker jsonParameterLocker { &_jsonParametersLock }; _jsonParameters = newJsonDocument.object(); } return sourceBuffer - startPosition; }
// called on the other nodes - assigns it to my views of the others int OctreeQuery::parseData(ReceivedMessage& message) { const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(message.getRawMessage()); const unsigned char* sourceBuffer = startPosition; // camera details memcpy(&_cameraPosition, sourceBuffer, sizeof(_cameraPosition)); sourceBuffer += sizeof(_cameraPosition); sourceBuffer += unpackOrientationQuatFromBytes(sourceBuffer, _cameraOrientation); sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &_cameraFov); sourceBuffer += unpackFloatRatioFromTwoByte(sourceBuffer,_cameraAspectRatio); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraNearClip); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraFarClip); memcpy(&_cameraEyeOffsetPosition, sourceBuffer, sizeof(_cameraEyeOffsetPosition)); sourceBuffer += sizeof(_cameraEyeOffsetPosition); // optional feature flags unsigned char bitItems = 0; bitItems = (unsigned char)*sourceBuffer++; // NOTE: we used to use these bits to set feature request items if we need to extend the protocol with optional features // do it here with... wantFeature= oneAtBit(bitItems, WANT_FEATURE_BIT); Q_UNUSED(bitItems); // desired Max Octree PPS memcpy(&_maxQueryPPS, sourceBuffer, sizeof(_maxQueryPPS)); sourceBuffer += sizeof(_maxQueryPPS); // desired _octreeElementSizeScale memcpy(&_octreeElementSizeScale, sourceBuffer, sizeof(_octreeElementSizeScale)); sourceBuffer += sizeof(_octreeElementSizeScale); // desired boundaryLevelAdjust memcpy(&_boundaryLevelAdjust, sourceBuffer, sizeof(_boundaryLevelAdjust)); sourceBuffer += sizeof(_boundaryLevelAdjust); auto bytesRead = sourceBuffer - startPosition; auto bytesLeft = message.getSize() - bytesRead; if (bytesLeft >= (int)sizeof(_cameraCenterRadius)) { memcpy(&_cameraCenterRadius, sourceBuffer, sizeof(_cameraCenterRadius)); sourceBuffer += sizeof(_cameraCenterRadius); } return sourceBuffer - startPosition; }
// called on the other nodes - assigns it to my views of the others int OctreeQuery::parseData(const QByteArray& packet) { // increment to push past the packet header int numBytesPacketHeader = numBytesForPacketHeader(packet); const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(packet.data()); const unsigned char* sourceBuffer = startPosition + numBytesPacketHeader; // camera details memcpy(&_cameraPosition, sourceBuffer, sizeof(_cameraPosition)); sourceBuffer += sizeof(_cameraPosition); sourceBuffer += unpackOrientationQuatFromBytes(sourceBuffer, _cameraOrientation); sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &_cameraFov); sourceBuffer += unpackFloatRatioFromTwoByte(sourceBuffer,_cameraAspectRatio); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraNearClip); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraFarClip); memcpy(&_cameraEyeOffsetPosition, sourceBuffer, sizeof(_cameraEyeOffsetPosition)); sourceBuffer += sizeof(_cameraEyeOffsetPosition); // voxel sending features... unsigned char bitItems = 0; bitItems = (unsigned char)*sourceBuffer++; _wantLowResMoving = oneAtBit(bitItems, WANT_LOW_RES_MOVING_BIT); _wantColor = oneAtBit(bitItems, WANT_COLOR_AT_BIT); _wantDelta = oneAtBit(bitItems, WANT_DELTA_AT_BIT); _wantOcclusionCulling = oneAtBit(bitItems, WANT_OCCLUSION_CULLING_BIT); _wantCompression = oneAtBit(bitItems, WANT_COMPRESSION); // desired Max Octree PPS memcpy(&_maxOctreePPS, sourceBuffer, sizeof(_maxOctreePPS)); sourceBuffer += sizeof(_maxOctreePPS); // desired _octreeElementSizeScale memcpy(&_octreeElementSizeScale, sourceBuffer, sizeof(_octreeElementSizeScale)); sourceBuffer += sizeof(_octreeElementSizeScale); // desired boundaryLevelAdjust memcpy(&_boundaryLevelAdjust, sourceBuffer, sizeof(_boundaryLevelAdjust)); sourceBuffer += sizeof(_boundaryLevelAdjust); return sourceBuffer - startPosition; }
int ConicalViewFrustum::deserialize(const unsigned char* sourceBuffer) { const unsigned char* startPosition = sourceBuffer; memcpy(&_position, sourceBuffer, sizeof(_position)); sourceBuffer += sizeof(_position); memcpy(&_direction, sourceBuffer, sizeof(_direction)); sourceBuffer += sizeof(_direction); sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*)sourceBuffer, &_angle); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer, _farClip); memcpy(&_radius, sourceBuffer, sizeof(_radius)); sourceBuffer += sizeof(_radius); calculate(); return sourceBuffer - startPosition; }
// called on the other nodes - assigns it to my views of the others int OctreeQuery::parseData(ReceivedMessage& message) { const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(message.getRawMessage()); const unsigned char* sourceBuffer = startPosition; // unpack the connection ID uint16_t newConnectionID; memcpy(&newConnectionID, sourceBuffer, sizeof(newConnectionID)); sourceBuffer += sizeof(newConnectionID); if (!_hasReceivedFirstQuery) { // set our flag to indicate that we've parsed for this query at least once _hasReceivedFirstQuery = true; // set the incoming connection ID as the current _connectionID = newConnectionID; } else { if (newConnectionID != _connectionID) { // the connection ID has changed - emit our signal so the server // knows that the client is starting a new session _connectionID = newConnectionID; emit incomingConnectionIDChanged(); } } // check if this query uses a view frustum memcpy(&_usesFrustum, sourceBuffer, sizeof(_usesFrustum)); sourceBuffer += sizeof(_usesFrustum); if (_usesFrustum) { // unpack camera details memcpy(&_cameraPosition, sourceBuffer, sizeof(_cameraPosition)); sourceBuffer += sizeof(_cameraPosition); sourceBuffer += unpackOrientationQuatFromBytes(sourceBuffer, _cameraOrientation); sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &_cameraFov); sourceBuffer += unpackFloatRatioFromTwoByte(sourceBuffer,_cameraAspectRatio); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraNearClip); sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraFarClip); memcpy(&_cameraEyeOffsetPosition, sourceBuffer, sizeof(_cameraEyeOffsetPosition)); sourceBuffer += sizeof(_cameraEyeOffsetPosition); } // desired Max Octree PPS memcpy(&_maxQueryPPS, sourceBuffer, sizeof(_maxQueryPPS)); sourceBuffer += sizeof(_maxQueryPPS); // desired _octreeElementSizeScale memcpy(&_octreeElementSizeScale, sourceBuffer, sizeof(_octreeElementSizeScale)); sourceBuffer += sizeof(_octreeElementSizeScale); // desired boundaryLevelAdjust memcpy(&_boundaryLevelAdjust, sourceBuffer, sizeof(_boundaryLevelAdjust)); sourceBuffer += sizeof(_boundaryLevelAdjust); memcpy(&_cameraCenterRadius, sourceBuffer, sizeof(_cameraCenterRadius)); sourceBuffer += sizeof(_cameraCenterRadius); // check if we have a packed JSON filter uint16_t binaryParametersBytes; memcpy(&binaryParametersBytes, sourceBuffer, sizeof(binaryParametersBytes)); sourceBuffer += sizeof(binaryParametersBytes); if (binaryParametersBytes > 0) { // unpack the binary JSON parameters QByteArray binaryJSONParameters { binaryParametersBytes, 0 }; memcpy(binaryJSONParameters.data(), sourceBuffer, binaryParametersBytes); sourceBuffer += binaryParametersBytes; // grab the parameter object from the packed binary representation of JSON auto newJsonDocument = QJsonDocument::fromBinaryData(binaryJSONParameters); QWriteLocker jsonParameterLocker { &_jsonParametersLock }; _jsonParameters = newJsonDocument.object(); } return sourceBuffer - startPosition; }