float ConicalViewFrustum::getAngularSize(const AABox& box) const { auto radius = 0.5f * glm::length(box.getScale()); // radius of bounding sphere auto position = box.calcCenter() - _position; // position of bounding sphere in view-frame float distance = glm::length(position); return getAngularSize(distance, radius); }
// Similar strategy to getProjectedPolygon() we use the knowledge of camera position relative to the // axis-aligned voxels to determine which of the voxels vertices must be the furthest. No need for // squares and square-roots. Just compares. glm::vec3 ViewFrustum::getFurthestPointFromCamera(const AABox& box) const { const glm::vec3& bottomNearRight = box.getCorner(); glm::vec3 center = box.calcCenter(); glm::vec3 topFarLeft = box.calcTopFarLeft(); glm::vec3 furthestPoint; if (_position.x < center.x) { // we are to the right of the center, so the left edge is furthest furthestPoint.x = topFarLeft.x; } else { // we are to the left of the center, so the right edge is furthest (at center ok too) furthestPoint.x = bottomNearRight.x; } if (_position.y < center.y) { // we are below of the center, so the top edge is furthest furthestPoint.y = topFarLeft.y; } else { // we are above the center, so the lower edge is furthest (at center ok too) furthestPoint.y = bottomNearRight.y; } if (_position.z < center.z) { // we are to the near side of the center, so the far side edge is furthest furthestPoint.z = topFarLeft.z; } else { // we are to the far side of the center, so the near side edge is furthest (at center ok too) furthestPoint.z = bottomNearRight.z; } return furthestPoint; }
float calculateRenderAccuracy(const glm::vec3& position, const AABox& bounds, float octreeSizeScale, int boundaryLevelAdjust) { float largestDimension = bounds.getLargestDimension(); const float maxScale = (float)TREE_SCALE; float visibleDistanceAtMaxScale = boundaryDistanceForRenderLevel(boundaryLevelAdjust, octreeSizeScale) / OCTREE_TO_MESH_RATIO; static std::once_flag once; static QMap<float, float> shouldRenderTable; std::call_once(once, [&] { float SMALLEST_SCALE_IN_TABLE = 0.001f; // 1mm is plenty small float scale = maxScale; float factor = 1.0f; while (scale > SMALLEST_SCALE_IN_TABLE) { scale /= 2.0f; factor /= 2.0f; shouldRenderTable[scale] = factor; } }); float closestScale = maxScale; float visibleDistanceAtClosestScale = visibleDistanceAtMaxScale; QMap<float, float>::const_iterator lowerBound = shouldRenderTable.lowerBound(largestDimension); if (lowerBound != shouldRenderTable.constEnd()) { closestScale = lowerBound.key(); visibleDistanceAtClosestScale = visibleDistanceAtMaxScale * lowerBound.value(); } if (closestScale < largestDimension) { visibleDistanceAtClosestScale *= 2.0f; } // FIXME - for now, it's either visible or not visible. We want to adjust this to eventually return // a floating point for objects that have small angular size to indicate that they may be rendered // with lower preciscion float distanceToCamera = glm::length(bounds.calcCenter() - position); return (distanceToCamera <= visibleDistanceAtClosestScale) ? 1.0f : 0.0f; }
bool LODManager::shouldRender(const RenderArgs* args, const AABox& bounds) { const float maxScale = (float)TREE_SCALE; const float octreeToMeshRatio = 4.0f; // must be this many times closer to a mesh than a voxel to see it. float octreeSizeScale = args->_sizeScale; int boundaryLevelAdjust = args->_boundaryLevelAdjust; float visibleDistanceAtMaxScale = boundaryDistanceForRenderLevel(boundaryLevelAdjust, octreeSizeScale) / octreeToMeshRatio; float distanceToCamera = glm::length(bounds.calcCenter() - args->_viewFrustum->getPosition()); float largestDimension = bounds.getLargestDimension(); static bool shouldRenderTableNeedsBuilding = true; static QMap<float, float> shouldRenderTable; if (shouldRenderTableNeedsBuilding) { float SMALLEST_SCALE_IN_TABLE = 0.001f; // 1mm is plenty small float scale = maxScale; float factor = 1.0f; while (scale > SMALLEST_SCALE_IN_TABLE) { scale /= 2.0f; factor /= 2.0f; shouldRenderTable[scale] = factor; } shouldRenderTableNeedsBuilding = false; } float closestScale = maxScale; float visibleDistanceAtClosestScale = visibleDistanceAtMaxScale; QMap<float, float>::const_iterator lowerBound = shouldRenderTable.lowerBound(largestDimension); if (lowerBound != shouldRenderTable.constEnd()) { closestScale = lowerBound.key(); visibleDistanceAtClosestScale = visibleDistanceAtMaxScale * lowerBound.value(); } if (closestScale < largestDimension) { visibleDistanceAtClosestScale *= 2.0f; } return distanceToCamera <= visibleDistanceAtClosestScale; };
void ModelMeshPartPayload::render(RenderArgs* args) const { PerformanceTimer perfTimer("ModelMeshPartPayload::render"); if (!_model->_readyWhenAdded || !_model->_isVisible) { return; // bail asap } gpu::Batch& batch = *(args->_batch); if (!getShapeKey().isValid()) { return; } // render the part bounding box #ifdef DEBUG_BOUNDING_PARTS { AABox partBounds = getPartBounds(_meshIndex, partIndex); glm::vec4 cubeColor(1.0f, 1.0f, 0.0f, 1.0f); if (isSkinned) { cubeColor = glm::vec4(0.0f, 1.0f, 1.0f, 1.0f); } else if (args->_viewFrustum->boxIntersectsFrustum(partBounds)) { cubeColor = glm::vec4(1.0f, 0.0f, 1.0f, 1.0f); } Transform transform; transform.setTranslation(partBounds.calcCenter()); transform.setScale(partBounds.getDimensions()); batch.setModelTransform(transform); DependencyManager::get<GeometryCache>()->renderWireCube(batch, 1.0f, cubeColor); } #endif //def DEBUG_BOUNDING_PARTS auto locations = args->_pipeline->locations; assert(locations); // Bind the model transform and the skinCLusterMatrices if needed bool canCauterize = args->_renderMode != RenderArgs::SHADOW_RENDER_MODE; _model->updateClusterMatrices(_transform.getTranslation(), _transform.getRotation()); bindTransform(batch, locations, canCauterize); //Bind the index buffer and vertex buffer and Blend shapes if needed bindMesh(batch); // apply material properties bindMaterial(batch, locations); if (args) { args->_details._materialSwitches++; } // Draw! { PerformanceTimer perfTimer("batch.drawIndexed()"); drawCall(batch); } if (args) { const int INDICES_PER_TRIANGLE = 3; args->_details._trianglesRendered += _drawPart._numIndices / INDICES_PER_TRIANGLE; } }
void ModelMeshPartPayload::render(RenderArgs* args) const { PerformanceTimer perfTimer("ModelMeshPartPayload::render"); if (!_model->_readyWhenAdded || !_model->_isVisible) { return; // bail asap } gpu::Batch& batch = *(args->_batch); ShapeKey key = getShapeKey(); if (!key.isValid()) { return; } // render the part bounding box #ifdef DEBUG_BOUNDING_PARTS { AABox partBounds = getPartBounds(_meshIndex, partIndex); bool inView = args->_viewFrustum->boxInFrustum(partBounds) != ViewFrustum::OUTSIDE; glm::vec4 cubeColor; if (isSkinned) { cubeColor = glm::vec4(0.0f, 1.0f, 1.0f, 1.0f); } else if (inView) { cubeColor = glm::vec4(1.0f, 0.0f, 1.0f, 1.0f); } else { cubeColor = glm::vec4(1.0f, 1.0f, 0.0f, 1.0f); } Transform transform; transform.setTranslation(partBounds.calcCenter()); transform.setScale(partBounds.getDimensions()); batch.setModelTransform(transform); DependencyManager::get<GeometryCache>()->renderWireCube(batch, 1.0f, cubeColor); } #endif //def DEBUG_BOUNDING_PARTS auto locations = args->_pipeline->locations; assert(locations); // Bind the model transform and the skinCLusterMatrices if needed _model->updateClusterMatrices(_transform.getTranslation(), _transform.getRotation()); bindTransform(batch, locations); //Bind the index buffer and vertex buffer and Blend shapes if needed bindMesh(batch); // apply material properties bindMaterial(batch, locations); // TODO: We should be able to do that just in the renderTransparentJob if (key.isTranslucent() && locations->lightBufferUnit >= 0) { PerformanceTimer perfTimer("DLE->setupTransparent()"); DependencyManager::get<DeferredLightingEffect>()->setupTransparent(args, locations->lightBufferUnit); } if (args) { args->_details._materialSwitches++; } // Draw! { PerformanceTimer perfTimer("batch.drawIndexed()"); drawCall(batch); } if (args) { const int INDICES_PER_TRIANGLE = 3; args->_details._trianglesRendered += _drawPart._numIndices / INDICES_PER_TRIANGLE; } }
OctreeProjectedPolygon ViewFrustum::getProjectedPolygon(const AABox& box) const { const glm::vec3& bottomNearRight = box.getCorner(); glm::vec3 topFarLeft = box.calcTopFarLeft(); int lookUp = ((_position.x < bottomNearRight.x) ) // 1 = right | compute 6-bit + ((_position.x > topFarLeft.x ) << 1) // 2 = left | code to + ((_position.y < bottomNearRight.y) << 2) // 4 = bottom | classify camera + ((_position.y > topFarLeft.y ) << 3) // 8 = top | with respect to + ((_position.z < bottomNearRight.z) << 4) // 16 = front/near | the 6 defining + ((_position.z > topFarLeft.z ) << 5); // 32 = back/far | planes int vertexCount = hullVertexLookup[lookUp][0]; //look up number of vertices OctreeProjectedPolygon projectedPolygon(vertexCount); bool pointInView = true; bool allPointsInView = false; // assume the best, but wait till we know we have a vertex bool anyPointsInView = false; // assume the worst! if (vertexCount) { allPointsInView = true; // assume the best! for(int i = 0; i < vertexCount; i++) { int vertexNum = hullVertexLookup[lookUp][i+1]; glm::vec3 point = box.getVertex((BoxVertex)vertexNum); glm::vec2 projectedPoint = projectPoint(point, pointInView); allPointsInView = allPointsInView && pointInView; anyPointsInView = anyPointsInView || pointInView; projectedPolygon.setVertex(i, projectedPoint); } /*** // Now that we've got the polygon, if it extends beyond the clipping window, then let's clip it // NOTE: This clipping does not improve our overall performance. It basically causes more polygons to // end up in the same quad/half and so the polygon lists get longer, and that's more calls to polygon.occludes() if ( (projectedPolygon.getMaxX() > PolygonClip::RIGHT_OF_CLIPPING_WINDOW ) || (projectedPolygon.getMaxY() > PolygonClip::TOP_OF_CLIPPING_WINDOW ) || (projectedPolygon.getMaxX() < PolygonClip::LEFT_OF_CLIPPING_WINDOW ) || (projectedPolygon.getMaxY() < PolygonClip::BOTTOM_OF_CLIPPING_WINDOW) ) { CoverageRegion::_clippedPolygons++; glm::vec2* clippedVertices; int clippedVertexCount; PolygonClip::clipToScreen(projectedPolygon.getVertices(), vertexCount, clippedVertices, clippedVertexCount); // Now reset the vertices of our projectedPolygon object projectedPolygon.setVertexCount(clippedVertexCount); for(int i = 0; i < clippedVertexCount; i++) { projectedPolygon.setVertex(i, clippedVertices[i]); } delete[] clippedVertices; lookUp += PROJECTION_CLIPPED; } ***/ } // set the distance from our camera position, to the closest vertex float distance = glm::distance(getPosition(), box.calcCenter()); projectedPolygon.setDistance(distance); projectedPolygon.setAnyInView(anyPointsInView); projectedPolygon.setAllInView(allPointsInView); projectedPolygon.setProjectionType(lookUp); // remember the projection type return projectedPolygon; }
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) { // Send stream properties bool hasReverb = false; float reverbTime, wetLevel; // find reverb properties for (int i = 0; i < _zoneReverbSettings.size(); ++i) { AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData()); glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition(); AABox box = _audioZones[_zoneReverbSettings[i].zone]; if (box.contains(streamPosition)) { hasReverb = true; reverbTime = _zoneReverbSettings[i].reverbTime; wetLevel = _zoneReverbSettings[i].wetLevel; // Modulate wet level with distance to wall float MIN_ATTENUATION_DISTANCE = 2.0f; float MAX_ATTENUATION = -12; // dB glm::vec3 distanceToWalls = (box.getDimensions() / 2.0f) - glm::abs(streamPosition - box.calcCenter()); float distanceToClosestWall = glm::min(distanceToWalls.x, distanceToWalls.z); if (distanceToClosestWall < MIN_ATTENUATION_DISTANCE) { wetLevel += MAX_ATTENUATION * (1.0f - distanceToClosestWall / MIN_ATTENUATION_DISTANCE); } break; } } AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData()); AvatarAudioStream* stream = nodeData->getAvatarAudioStream(); bool dataChanged = (stream->hasReverb() != hasReverb) || (stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel)); if (dataChanged) { // Update stream if (hasReverb) { stream->setReverb(reverbTime, wetLevel); } else { stream->clearReverb(); } } // Send at change or every so often float CHANCE_OF_SEND = 0.01f; bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND); if (sendData) { auto nodeList = DependencyManager::get<NodeList>(); unsigned char bitset = 0; int packetSize = sizeof(bitset); if (hasReverb) { packetSize += sizeof(reverbTime) + sizeof(wetLevel); } auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize); if (hasReverb) { setAtBit(bitset, HAS_REVERB_BIT); } envPacket->writePrimitive(bitset); if (hasReverb) { envPacket->writePrimitive(reverbTime); envPacket->writePrimitive(wetLevel); } nodeList->sendPacket(std::move(envPacket), *node); } }
bool EntityEditFilters::filter(glm::vec3& position, EntityItemProperties& propertiesIn, EntityItemProperties& propertiesOut, bool& wasChanged, EntityTree::FilterType filterType, EntityItemID& itemID, EntityItemPointer& existingEntity) { // get the ids of all the zones (plus the global entity edit filter) that the position // lies within auto zoneIDs = getZonesByPosition(position); for (auto id : zoneIDs) { if (!itemID.isInvalidID() && id == itemID) { continue; } // get the filter pair, etc... _lock.lockForRead(); FilterData filterData = _filterDataMap.value(id); _lock.unlock(); if (filterData.valid()) { if (filterData.rejectAll) { return false; } // check to see if this filter wants to filter this message type if ((!filterData.wantsToFilterEdit && filterType == EntityTree::FilterType::Edit) || (!filterData.wantsToFilterPhysics && filterType == EntityTree::FilterType::Physics) || (!filterData.wantsToFilterDelete && filterType == EntityTree::FilterType::Delete) || (!filterData.wantsToFilterAdd && filterType == EntityTree::FilterType::Add)) { wasChanged = false; return true; // accept the message } auto oldProperties = propertiesIn.getDesiredProperties(); auto specifiedProperties = propertiesIn.getChangedProperties(); propertiesIn.setDesiredProperties(specifiedProperties); QScriptValue inputValues = propertiesIn.copyToScriptValue(filterData.engine, false, true, true); propertiesIn.setDesiredProperties(oldProperties); auto in = QJsonValue::fromVariant(inputValues.toVariant()); // grab json copy now, because the inputValues might be side effected by the filter. QScriptValueList args; args << inputValues; args << filterType; // get the current properties for then entity and include them for the filter call if (existingEntity && filterData.wantsOriginalProperties) { auto currentProperties = existingEntity->getProperties(filterData.includedOriginalProperties); QScriptValue currentValues = currentProperties.copyToScriptValue(filterData.engine, false, true, true); args << currentValues; } // get the zone properties if (filterData.wantsZoneProperties) { auto zoneEntity = _tree->findEntityByEntityItemID(id); if (zoneEntity) { auto zoneProperties = zoneEntity->getProperties(filterData.includedZoneProperties); QScriptValue zoneValues = zoneProperties.copyToScriptValue(filterData.engine, false, true, true); if (filterData.wantsZoneBoundingBox) { bool success = true; AABox aaBox = zoneEntity->getAABox(success); if (success) { QScriptValue boundingBox = filterData.engine->newObject(); QScriptValue bottomRightNear = vec3ToScriptValue(filterData.engine, aaBox.getCorner()); QScriptValue topFarLeft = vec3ToScriptValue(filterData.engine, aaBox.calcTopFarLeft()); QScriptValue center = vec3ToScriptValue(filterData.engine, aaBox.calcCenter()); QScriptValue boundingBoxDimensions = vec3ToScriptValue(filterData.engine, aaBox.getDimensions()); boundingBox.setProperty("brn", bottomRightNear); boundingBox.setProperty("tfl", topFarLeft); boundingBox.setProperty("center", center); boundingBox.setProperty("dimensions", boundingBoxDimensions); zoneValues.setProperty("boundingBox", boundingBox); } } // If this is an add or delete, or original properties weren't requested // there won't be original properties in the args, but zone properties need // to be the fourth parameter, so we need to pad the args accordingly int EXPECTED_ARGS = 3; if (args.length() < EXPECTED_ARGS) { args << QScriptValue(); } assert(args.length() == EXPECTED_ARGS); // we MUST have 3 args by now! args << zoneValues; } } QScriptValue result = filterData.filterFn.call(_nullObjectForFilter, args); if (filterData.uncaughtExceptions()) { return false; } if (result.isObject()) { // make propertiesIn reflect the changes, for next filter... propertiesIn.copyFromScriptValue(result, false); // and update propertiesOut too. TODO: this could be more efficient... propertiesOut.copyFromScriptValue(result, false); // Javascript objects are == only if they are the same object. To compare arbitrary values, we need to use JSON. auto out = QJsonValue::fromVariant(result.toVariant()); wasChanged |= (in != out); } else if (result.isBool()) { // if the filter returned false, then it's authoritative if (!result.toBool()) { return false; } // otherwise, assume it wants to pass all properties propertiesOut = propertiesIn; wasChanged = false; } else { return false; } } } // if we made it here, return true; }