Image3::Ref Raytracer::render(const RenderSettings& settings, const GCamera& camera, Vector2int16 pixel) { gStatus = 0; gNumPixelsRendered = 0; _renderedImage = Image3::createEmpty(); _renderedImage->resize(settings._width, settings._height); _currentCamera = camera; _settings = settings; _photonMap->clear(); if (settings._usePhotonMap) { if (_photonMap->size() == 0) { photonMapForwardTrace(); } } if (pixel.x == ALL && pixel.y == ALL) { if (settings._multiThreaded) { GThread::runConcurrently2D( Vector2int32(0,0), Vector2int32(settings._width, settings._height), this, &Raytracer::backwardTrace); } else { for (int y = 0; y < settings._height; ++y) { for (int x = 0; x < settings._width; ++x) { backwardTrace(x, y); } } } } else { backwardTrace(pixel.x, pixel.y); } return _renderedImage; }
Vector2int32 Vector2int32::clamp(const Vector2int32& lo, const Vector2int32& hi) { return Vector2int32(iClamp(x, lo.x, hi.x), iClamp(y, lo.y, hi.y)); }
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) { // This implementation is equivalent to the default GApp's. It is repeated here to make it // easy to modify rendering. If you don't require custom rendering, just delete this // method from your application and rely on the base class. if (! scene()) { return; } m_gbuffer->setSpecification(m_gbufferSpecification); m_gbuffer->resize(m_framebuffer->width(), m_framebuffer->height()); m_gbuffer->prepare(rd, activeCamera(), 0, -(float)previousSimTimeStep(), m_settings.hdrFramebuffer.depthGuardBandThickness, m_settings.hdrFramebuffer.colorGuardBandThickness); m_renderer->render(rd, m_framebuffer, m_depthPeelFramebuffer, scene()->lightingEnvironment(), m_gbuffer, allSurfaces); // Debug visualizations and post-process effects rd->pushState(m_framebuffer); { if (m_enableSVO) { rd->clear(); //rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame()); rd->push2D(); const Vector2int32 guardBand(m_settings.hdrFramebuffer.depthGuardBandThickness - m_settings.hdrFramebuffer.colorGuardBandThickness); const Vector2int32 colorRegionExtent = Vector2int32(m_framebuffer->vector2Bounds()) - guardBand * 2; Args args; rd->setGuardBandClip2D(Vector2int16(guardBand)); args.setRect(rd->viewport()); Matrix4 proj; activeCamera()->getProjectUnitMatrix(m_framebuffer->rect2DBounds(), proj); float focalLength = proj[0][0]; m_svo->setCurSvoId(0); args.setUniform("guardBand", guardBand); args.setUniform("focalLength", focalLength); args.setUniform("renderRes", Vector2(colorRegionExtent)); args.setUniform("renderResI", colorRegionExtent); args.setUniform("screenRatio", float(colorRegionExtent.y) / float(colorRegionExtent.x)); m_svo->connectToShader(args, Access::READ, m_svo->maxDepth(), m_svo->maxDepth()); rd->setColorWrite(true); rd->setDepthWrite(false); const Matrix4& cameraToVoxelMatrix = Matrix4(m_svo->svoToWorldMatrix()).inverse() * activeCamera()->frame(); args.setUniform("cameraToVoxelMatrix", cameraToVoxelMatrix); args.setUniform("voxelToWorldMatrix", m_svo->svoToWorldMatrix()); args.setUniform("worldToVoxelMatrix", m_svo->worldToSVOMatrix()); args.setUniform("wsCameraPos", activeCamera()->frame().translation); scene()->lightingEnvironment().setShaderArgs(args); args.setUniform("raycastingConeFactor", m_voxelConeAperture); rd->setDepthTest(RenderDevice::DEPTH_ALWAYS_PASS); // TODO: write gl_FragDepth and use a regular depth test here m_gbuffer->texture(GBuffer::Field::DEPTH_AND_STENCIL)->setShaderArgs(args, "depth_", Sampler::buffer()); //rd->setBlendFunc(RenderDevice::BLEND_ONE, RenderDevice::BLEND_ONE_MINUS_SRC_ALPHA); LAUNCH_SHADER("raycast.pix", args); rd->pop2D(); } // Call to make the App show the output of debugDraw(...) rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame()); drawDebugShapes(); const shared_ptr<Entity>& selectedEntity = (notNull(developerWindow) && notNull(developerWindow->sceneEditorWindow)) ? developerWindow->sceneEditorWindow->selectedEntity() : shared_ptr<Entity>(); scene()->visualize(rd, selectedEntity, allSurfaces, sceneVisualizationSettings(), activeCamera()); rd->setPolygonOffset(-0.2f); if (m_debugSVONodes) { m_svo->visualizeNodes(rd, m_debugSVONodeLevel); } if (m_debugSVOFragments) { m_svo->visualizeFragments(rd); } rd->setPolygonOffset(0.0f); // Post-process special effects m_depthOfField->apply(rd, m_framebuffer->texture(0), m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), m_settings.hdrFramebuffer.depthGuardBandThickness - m_settings.hdrFramebuffer.colorGuardBandThickness); m_motionBlur->apply(rd, m_framebuffer->texture(0), m_gbuffer->texture(GBuffer::Field::SS_EXPRESSIVE_MOTION), m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), m_settings.hdrFramebuffer.depthGuardBandThickness - m_settings.hdrFramebuffer.colorGuardBandThickness); } rd->popState(); if ((submitToDisplayMode() == SubmitToDisplayMode::MAXIMIZE_THROUGHPUT) && (!renderDevice->swapBuffersAutomatically())) { // We're about to render to the actual back buffer, so swap the buffers now. // This call also allows the screenshot and video recording to capture the // previous frame just before it is displayed. swapBuffers(); } // Clear the entire screen (needed even though we'll render over it, since // AFR uses clear() to detect that the buffer is not re-used.) rd->clear(); // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_framebuffer->texture(0)); }
void AmbientOcclusion::setShaderArgs(UniformTable& args, const String& prefix, const Sampler& sampler) { // The notNull macro is set by the texture() texture()->setShaderArgs(args, prefix, sampler); args.setUniform(prefix + "offset", Vector2int32(0, 0)); }