Example #1
0
void App::updateSonicSculpture(int audioSampleOffset, int audioSampleCount) {
	float delta = 0.1f;
	float radius = sqrt(m_smoothedRootMeanSquare) * 1.0f;
	CFrame frame = activeCamera()->frame();
	// Offset a bit forward
	frame.translation += activeCamera()->frame().lookVector() * 0.2f;
	// Offset 3 inches down to the mouth opening
	frame.translation += activeCamera()->frame().upVector() * -0.0762f;
	if (m_appMode == AppMode::DEFAULT) {
		if (notNull(m_currentSonicSculpturePiece)) {
			if (m_currentSonicSculpturePiece->size() > 0) {
                m_sonicSculpturePieces.append(m_currentSonicSculpturePiece);
                m_lastInterestingEventTime = System::time();
			}
			m_currentSonicSculpturePiece = shared_ptr<SonicSculpturePiece>();
		}
	} else if (m_appMode == AppMode::MAKING_SCULPTURE) {
		if (isNull(m_currentSonicSculpturePiece)) {
            shared_ptr<UniversalMaterial> material = getSonicSculptureMaterial(m_sonicSculpturePieces.size());
			m_currentSonicSculpturePiece = SonicSculpturePiece::create(material);
		}
        m_lastInterestingEventTime = System::time();
		// TODO: eliminate some of the redundant copies
		Array<float> samples;
		samples.resize(audioSampleCount);
		for (int i = 0; i < audioSampleCount; ++i) {
			samples[i] = m_cpuRawAudioSnapshot[i + audioSampleOffset];
		}
		m_currentSonicSculpturePiece->insert(frame, radius, delta, samples);
	}
}
Example #2
0
	bool MapPresenter::frameStarted(const Ogre::FrameEvent& evt)
	{
		if (!view_)
			return false;

		RenderWindow* render_window = view_->renderWindow();

		float fps = render_window->getAverageFPS();//this->caculateFPS(evt.timeSinceLastFrame);
		WorkspaceRoot::instance()->workspace()->displayFPS(fps);

		const Ogre::Vector3& campos = activeCamera()->getPosition();
		if(oldCamPos != campos)
		{
			WorkspaceRoot::instance()->workspace()->displayCameraPos(campos.x, campos.y, campos.z);
			oldCamPos = campos;
		}

		int tris = render_window->getTriangleCount();
		if(oldTris != tris)
		{
			WorkspaceRoot::instance()->workspace()->displayTriangleNum(tris);
			oldTris = tris;
		}
		return true;
	}
Example #3
0
void App::onSimulation(RealTime rdt, SimTime sdt, SimTime idt) {
    GApp::onSimulation(rdt, sdt, idt);

    for (int i = 0; i < m_sonicSculpturePieces.size(); ++i) {
        m_sonicSculpturePieces[i]->onSimulation(rdt, sdt, idt);
    }
    if (notNull(m_currentSonicSculpturePiece)) {
        m_currentSonicSculpturePiece->onSimulation(rdt, sdt, idt);
    }
    // See 
    Synthesizer::global->flushHackQueue();

    CFrame cameraFrame = activeCamera()->frame();
    cameraFrame.translation *= cameraAdjustment;
    activeCamera()->setFrame(cameraFrame);

    // Example GUI dynamic layout code.  Resize the debugWindow to fill
    // the screen horizontally.
    debugWindow->setRect(Rect2D::xywh(0, 0, (float)window()->width(), debugWindow->rect().height()));
}
Example #4
0
File: App.cpp Project: Mx7f/substep
void App::onUserInput(UserInput* ui) {
    GApp::onUserInput(ui);
    bool pressed = ui->keyPressed(GKey::LEFT_MOUSE);
    bool held    = ui->keyDown(GKey::LEFT_MOUSE);
    const Ray& mouseRay = scene()->eyeRay(activeCamera(), userInput->mouseXY() + Vector2(0.5f, 0.5f), RenderDevice::current->viewport(), Vector2int16(0, 0));
    m_automata.handleMouse(pressed, held, mouseRay, userInput->mouseXY());
    
    (void)ui;
    // Add key handling here based on the keys currently held or
    // ones that changed in the last frame.
}
Example #5
0
File: App.cpp Project: Mx7f/substep
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) {
    // This implementation is equivalent to the default GApp's. It is repeated here to make it
    // easy to modify rendering. If you don't require custom rendering, just delete this
    // method from your application and rely on the base class.

    if (! scene()) {
        return;
    }

    // Debug visualizations and post-process effects
    rd->pushState(m_framebuffer); {
        // Call to make the App show the output of debugDraw(...)
        rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());
	
	    rd->setColorClearValue(Color3::black());
	    rd->clear();
        const Ray& mouseRay = scene()->eyeRay(activeCamera(), userInput->mouseXY() + Vector2(0.5f, 0.5f), rd->viewport(), Vector2int16(0, 0));
	    m_automata.draw(rd, mouseRay, m_gridColor);
        if (m_showHelp) {
            renderGUI(rd);
        }
        

    } rd->popState();

    if ((submitToDisplayMode() == SubmitToDisplayMode::MAXIMIZE_THROUGHPUT) && (!renderDevice->swapBuffersAutomatically())) {
        // We're about to render to the actual back buffer, so swap the buffers now.
        // This call also allows the screenshot and video recording to capture the
        // previous frame just before it is displayed.
        swapBuffers();
    }

	// Clear the entire screen (needed even though we'll render over it, since
    // AFR uses clear() to detect that the buffer is not re-used.)
    rd->clear();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_framebuffer->texture(0));
}
Example #6
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& surface3D) { 
    // Bind the main framebuffer
    rd->pushState(m_framebuffer); {
        rd->clear();

    LightingEnvironment env = m_scene->lighting();
    env.ambientOcclusion = m_ambientOcclusion;

    // Render full shading viewport
    const Rect2D& shadeViewport = Rect2D::xywh(0, 0, rd->width() / 2.0f, rd->height() / 2.0f);
    rd->setViewport(shadeViewport);

    Draw::skyBox(rd, env.environmentMapArray[0]);

    Surface::renderDepthOnly(rd, surface3D, CullFace::BACK);

    m_ambientOcclusion->update(rd, env.ambientOcclusionSettings, activeCamera(), m_framebuffer->texture(Framebuffer::DEPTH));

    Array<shared_ptr<Surface> > sortedVisible;
    Surface::cull(activeCamera()->frame(), activeCamera()->projection(), rd->viewport(), surface3D, sortedVisible);
    Surface::sortBackToFront(sortedVisible, activeCamera()->frame().lookVector());
    rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());
    for (int i = 0; i < sortedVisible.size(); ++i) {
        sortedVisible[i]->render(rd, env, RenderPassType::OPAQUE_SAMPLES, "");
    }

    // Wireframe views
    shared_ptr<Camera> wireCamera[3] = {Camera::create(), Camera::create(), Camera::create()};
    wireCamera[0]->setFrame(CFrame::fromXYZYPRDegrees(0,40,0,0,-90));
    wireCamera[1]->setFrame(CFrame::fromXYZYPRDegrees(0,0,40,0,0));
    wireCamera[2]->setFrame(CFrame::fromXYZYPRDegrees(40,0,0,90,0));

    Rect2D wireViewport[3];
    wireViewport[0] = shadeViewport + Vector2(rd->width() / 2.0f, 0.0f);
    wireViewport[1] = shadeViewport + Vector2(rd->width() / 2.0f, rd->height() / 2.0f);
    wireViewport[2] = shadeViewport + Vector2(0.0f, rd->height() / 2.0f);

    for (int i = 0; i < 3; ++i) {
        rd->setViewport(wireViewport[i]);
        rd->setProjectionAndCameraMatrix(wireCamera[i]->projection(), wireCamera[i]->frame());

        Surface::renderWireframe(rd, surface3D);
        Draw::axes(rd);

        // Call to make the GApp show the output of debugDraw calls
        drawDebugShapes();
    }

    } rd->popState();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_framebuffer->texture(0), 1);
}
Example #7
0
void App::onSimulation(RealTime rdt, SimTime sdt, SimTime idt) {
    GApp::onSimulation(rdt, sdt, idt);

    if (m_firstPersonMode) {
        shared_ptr<PlayerEntity> p = m_scene->typedEntity<PlayerEntity>(m_playerName);
        CFrame c = p->frame();
        c.translation += Vector3(0, 0.6f, 0); // Get up to head height
        c.rotation = c.rotation * Matrix3::fromAxisAngle(Vector3::unitX(), p->headTilt());
        activeCamera()->setFrame(c);
    }


    // Example GUI dynamic layout code.  Resize the debugWindow to fill
    // the screen horizontally.
    debugWindow->setRect(Rect2D::xywh(0, 0, (float)window()->width(), debugWindow->rect().height()));

}
Example #8
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& surface3D) {

    m_gbuffer->setSpecification(m_gbufferSpecification);
    m_gbuffer->resize(m_framebuffer->width(), m_framebuffer->height());
    m_gbuffer->prepare(rd, activeCamera(), 0, -(float)previousSimTimeStep(), m_settings.depthGuardBandThickness, m_settings.colorGuardBandThickness);

    m_renderer->render(rd, m_framebuffer, m_depthPeelFramebuffer, scene()->lightingEnvironment(), m_gbuffer, surface3D);

    rd->pushState(m_framebuffer); {

        rd->setProjectionAndCameraMatrix(m_debugCamera->projection(), m_debugCamera->frame());
        Array< shared_ptr<Surface> > mySurfaces;
        // Pose our model based on the manipulator axes
        model->pose(mySurfaces, manipulator->frame());
            
        // Set up shared arguments
        Args args;
        configureShaderArgs(args);
            
        // Send model geometry to the graphics card
        CFrame cframe;
        for (int i = 0; i < mySurfaces.size(); ++i) {

            // Downcast to UniversalSurface to access its fields
            shared_ptr<UniversalSurface> surface = dynamic_pointer_cast<UniversalSurface>(mySurfaces[i]);
            if (notNull(surface)) {
                surface->getCoordinateFrame(cframe);
                rd->setObjectToWorldMatrix(cframe);
                surface->gpuGeom()->setShaderArgs(args);

                // (If you want to manually set the material properties and vertex attributes
                // for shader args, they can be accessed from the fields of the gpuGeom.)
                LAUNCH_SHADER("phong.*", args);
            }
        }
    } rd->popState();

    swapBuffers();
    rd->clear();
    m_film->exposeAndRender(rd, m_debugCamera->filmSettings(), m_framebuffer->texture(0), 1);
}
Example #9
0
void App::onAfterLoadScene(const Any& any, const String& sceneName) {
	Array<shared_ptr<Surface>> allSurfaces;
	Array<shared_ptr<Surface2D>> ignore;
	onPose(allSurfaces, ignore);

	G3D::AABox aabox;
	Surface::getBoxBounds(allSurfaces, aabox);
	const float diameter = aabox.extent().max();
	const Point3 center = aabox.center();
	const float pad = 0.10f;
	const Vector3 extent = Vector3::one() * diameter * (1.0f + pad);
	aabox = AABox(center - extent / 2, center + extent / 2);

	Box octtreeBounds(aabox);

	m_svo->init(renderDevice, size_t(SVO_POOL_SIZE) * 1024 * 1024, SVO_MAX_DEPTH, size_t(SVO_FRAGBUFFER_SIZE) * 1024 * 1024);

	m_svo->prepare(renderDevice, activeCamera(), octtreeBounds, 0.0f, /*-(float)previousSimTimeStep()*/ -0.016667f);

	Surface::renderIntoSVO(renderDevice, allSurfaces, m_svo);

	m_svo->complete(renderDevice, "SVO_downsampleValues.glc");

}
Example #10
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) {
    // This implementation is equivalent to the default GApp's. It is repeated here to make it
    // easy to modify rendering. If you don't require custom rendering, just delete this
    // method from your application and rely on the base class.

    if (! scene()) {
        return;
    }
	
    m_gbuffer->setSpecification(m_gbufferSpecification);
    m_gbuffer->resize(m_framebuffer->width(), m_framebuffer->height());
    m_gbuffer->prepare(rd, activeCamera(), 0, -(float)previousSimTimeStep(), m_settings.hdrFramebuffer.depthGuardBandThickness, m_settings.hdrFramebuffer.colorGuardBandThickness);

	
	m_renderer->render(rd, m_framebuffer, m_depthPeelFramebuffer, scene()->lightingEnvironment(), m_gbuffer, allSurfaces);
	

    // Debug visualizations and post-process effects
    rd->pushState(m_framebuffer); {

		if (m_enableSVO) {
			rd->clear();
			//rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());

			rd->push2D();
			const Vector2int32 guardBand(m_settings.hdrFramebuffer.depthGuardBandThickness - m_settings.hdrFramebuffer.colorGuardBandThickness);
			const Vector2int32 colorRegionExtent = Vector2int32(m_framebuffer->vector2Bounds()) - guardBand * 2;

			Args args;
			rd->setGuardBandClip2D(Vector2int16(guardBand));
			args.setRect(rd->viewport());

			Matrix4 proj;
			activeCamera()->getProjectUnitMatrix(m_framebuffer->rect2DBounds(), proj);
			float focalLength = proj[0][0];

			m_svo->setCurSvoId(0);
			args.setUniform("guardBand", guardBand);

			args.setUniform("focalLength", focalLength);
			args.setUniform("renderRes", Vector2(colorRegionExtent));
			args.setUniform("renderResI", colorRegionExtent);
			args.setUniform("screenRatio", float(colorRegionExtent.y) / float(colorRegionExtent.x));

			m_svo->connectToShader(args, Access::READ, m_svo->maxDepth(), m_svo->maxDepth());

			rd->setColorWrite(true);
			rd->setDepthWrite(false);
			
			const Matrix4& cameraToVoxelMatrix = Matrix4(m_svo->svoToWorldMatrix()).inverse() * activeCamera()->frame();

			args.setUniform("cameraToVoxelMatrix", cameraToVoxelMatrix);
			args.setUniform("voxelToWorldMatrix", m_svo->svoToWorldMatrix());
			args.setUniform("worldToVoxelMatrix", m_svo->worldToSVOMatrix());
			args.setUniform("wsCameraPos", activeCamera()->frame().translation);
			scene()->lightingEnvironment().setShaderArgs(args);
			args.setUniform("raycastingConeFactor", m_voxelConeAperture);
			

			rd->setDepthTest(RenderDevice::DEPTH_ALWAYS_PASS); // TODO: write gl_FragDepth and use a regular depth test here
			m_gbuffer->texture(GBuffer::Field::DEPTH_AND_STENCIL)->setShaderArgs(args, "depth_", Sampler::buffer());
			//rd->setBlendFunc(RenderDevice::BLEND_ONE, RenderDevice::BLEND_ONE_MINUS_SRC_ALPHA);
			
			LAUNCH_SHADER("raycast.pix", args);
			rd->pop2D();
		}

		// Call to make the App show the output of debugDraw(...)
		rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());
		drawDebugShapes();
		const shared_ptr<Entity>& selectedEntity = (notNull(developerWindow) && notNull(developerWindow->sceneEditorWindow)) ? developerWindow->sceneEditorWindow->selectedEntity() : shared_ptr<Entity>();
        scene()->visualize(rd, selectedEntity, allSurfaces, sceneVisualizationSettings(), activeCamera());		

		rd->setPolygonOffset(-0.2f);
		if (m_debugSVONodes) {
			m_svo->visualizeNodes(rd, m_debugSVONodeLevel);
		}
		if (m_debugSVOFragments) {
			m_svo->visualizeFragments(rd);
		}
		rd->setPolygonOffset(0.0f);

        // Post-process special effects
        m_depthOfField->apply(rd, m_framebuffer->texture(0), m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), m_settings.hdrFramebuffer.depthGuardBandThickness - m_settings.hdrFramebuffer.colorGuardBandThickness);
        
        m_motionBlur->apply(rd, m_framebuffer->texture(0), m_gbuffer->texture(GBuffer::Field::SS_EXPRESSIVE_MOTION), 
                            m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), 
                            m_settings.hdrFramebuffer.depthGuardBandThickness - m_settings.hdrFramebuffer.colorGuardBandThickness);



    } rd->popState();

    if ((submitToDisplayMode() == SubmitToDisplayMode::MAXIMIZE_THROUGHPUT) && (!renderDevice->swapBuffersAutomatically())) {
        // We're about to render to the actual back buffer, so swap the buffers now.
        // This call also allows the screenshot and video recording to capture the
        // previous frame just before it is displayed.
        swapBuffers();
    }

	// Clear the entire screen (needed even though we'll render over it, since
    // AFR uses clear() to detect that the buffer is not re-used.)
    rd->clear();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_framebuffer->texture(0));
}
Example #11
0
void Barbel::Scene::initScene()
{
    //Setup Framegraph
    if (m_frameGraph == Q_NULLPTR)
        m_frameGraph = new Qt3D::QFrameGraph();
    if (m_forwardRenderer == Q_NULLPTR)
        m_forwardRenderer = new Qt3D::QForwardRenderer();

    m_forwardRenderer->setClearColor(Qt::black);
    m_frameGraph->setActiveFrameGraph(m_forwardRenderer);
    m_rootEntity->addComponent(m_frameGraph);

    //Test Objects
    Barbel::Player *player = new Barbel::Player(m_rootEntity);
    player->setTranslation(QVector3D(0.0f, 0.0f, 0.0f));
    setActiveCamera(player->camera());
    activeCamera()->lens()->setAspectRatio(m_cameraAspectRatio);


    QVector3D startPostion(0.0f, 0.0f, 0.0f);
    QVector3D secondPosition(10.0f, 10.0f, 10.0f);
    QVector3D thirdPosition(-10.0f, 0.0f, 10.0f);

    QVector3D startAngles(0.0f, 0.0f, 0.0f);
    QVector3D secondAngles(45.0f, 150.0f, 0.0f);
    QVector3D thirdAngles(0.0f, 200.0f, 0.0f);
    QVector3D finalAngles(0.0f, 360.0f, 0.0f);


    QSequentialAnimationGroup *animationGroup = new QSequentialAnimationGroup(this);

    QParallelAnimationGroup *parallelAnimationGroup1 = new QParallelAnimationGroup(this);

    QPropertyAnimation *animation = new QPropertyAnimation(player, "translation", this);
    animation->setDuration(5000);
    animation->setStartValue(startPostion);
    animation->setEndValue(secondPosition);
    parallelAnimationGroup1->addAnimation(animation);

    QPropertyAnimation *rotationAnimation = new QPropertyAnimation(player, "rotation", this);
    rotationAnimation->setDuration(5000);
    rotationAnimation->setStartValue(startAngles);
    rotationAnimation->setEndValue(secondAngles);
    parallelAnimationGroup1->addAnimation(rotationAnimation);

    animationGroup->addAnimation(parallelAnimationGroup1);

    QParallelAnimationGroup *parallelAnimationGroup2 = new QParallelAnimationGroup(this);

    animation = new QPropertyAnimation(player, "translation", this);
    animation->setDuration(5000);
    animation->setStartValue(secondPosition);
    animation->setEndValue(thirdPosition);
    parallelAnimationGroup2->addAnimation(animation);

    rotationAnimation = new QPropertyAnimation(player, "rotation", this);
    rotationAnimation->setDuration(5000);
    rotationAnimation->setStartValue(secondAngles);
    rotationAnimation->setEndValue(thirdAngles);
    parallelAnimationGroup2->addAnimation(rotationAnimation);

    animationGroup->addAnimation(parallelAnimationGroup2);
    QParallelAnimationGroup *parallelAnimationGroup3 = new QParallelAnimationGroup(this);

    animation = new QPropertyAnimation(player, "translation", this);
    animation->setDuration(5000);
    animation->setStartValue(thirdPosition);
    animation->setEndValue(startPostion);
    parallelAnimationGroup3->addAnimation(animation);

    rotationAnimation = new QPropertyAnimation(player, "rotation", this);
    rotationAnimation->setDuration(5000);
    rotationAnimation->setStartValue(thirdAngles);
    rotationAnimation->setEndValue(finalAngles);
    parallelAnimationGroup3->addAnimation(rotationAnimation);

    animationGroup->addAnimation(parallelAnimationGroup3);
    animationGroup->setLoopCount(-1);
    animationGroup->start();

    //Test Cubes
    Qt3D::QPhongMaterial *phongMaterial1 = new Qt3D::QPhongMaterial();
    phongMaterial1->setDiffuse(QColor(94, 141, 25));
    phongMaterial1->setSpecular(Qt::white);
    Qt3D::QPhongMaterial *phongMaterial2 = new Qt3D::QPhongMaterial();
    phongMaterial2->setDiffuse(QColor(129, 23, 71));
    phongMaterial2->setSpecular(Qt::white);

    for (int z = -5; z < 5; z++) {
        for (int y = -5; y < 5; y++) {
            for (int x = -5; x < 5; x++) {
                float xSize = (rand() % 10000) / 10000.0;
                float ySize = (rand() % 10000) / 10000.0;
                float zSize = (rand() % 10000) / 10000.0;
                Barbel::TestCube *cube = new TestCube(QVector3D(x, y, z), QVector3D(xSize, ySize, zSize), m_rootEntity);
                if (y % 2)
                    cube->addComponent(phongMaterial1);
                else
                    cube->addComponent(phongMaterial2);
            }
        }
    }
}
Example #12
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) {
    rd->clear();

    // Bind the main framebuffer
    rd->pushState(m_framebuffer); {
        rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());

        m_gbuffer->resize(rd->width(), rd->height());
        m_gbuffer->prepare(rd, activeCamera(),  0, -(float)previousSimTimeStep(), m_settings.depthGuardBandThickness, m_settings.colorGuardBandThickness);
        rd->clear();
        
        // Cull and sort
        Array<shared_ptr<Surface> > sortedVisibleSurfaces;
        Surface::cull(activeCamera()->frame(), activeCamera()->projection(), rd->viewport(), allSurfaces, sortedVisibleSurfaces);
        Surface::sortBackToFront(sortedVisibleSurfaces, activeCamera()->frame().lookVector());
        
        // Depth pre-pass
        static const bool renderTransmissiveSurfaces = false;
        Surface::renderDepthOnly(rd, sortedVisibleSurfaces, CullFace::BACK, renderTransmissiveSurfaces);

        // Intentionally copy the lighting environment for mutation
        LightingEnvironment environment = scene()->lightingEnvironment();

        if (! m_settings.colorGuardBandThickness.isZero()) {
            rd->setGuardBandClip2D(m_settings.colorGuardBandThickness);
        }        
                
        // Render G-buffer if needed.  In this default implementation, it is needed if motion blur is enabled (for velocity) or
        // if face normals have been allocated and ambient occlusion is enabled.
        if (activeCamera()->motionBlurSettings().enabled() || 
            (environment.ambientOcclusionSettings.enabled && 
             notNull(m_gbuffer) && 
             notNull(m_gbuffer->texture(GBuffer::Field::CS_FACE_NORMAL)))) {

            rd->setDepthWrite(false); { 
                // We've already rendered the depth
                Surface::renderIntoGBuffer(rd, sortedVisibleSurfaces, m_gbuffer, activeCamera()->previousFrame(), activeCamera()->expressivePreviousFrame());
            } rd->setDepthWrite(true);
        }
        

        // Compute AO
        environment.ambientOcclusionSettings.useDepthPeelBuffer = false;
        m_ambientOcclusion->update(rd, environment.ambientOcclusionSettings, activeCamera(), m_framebuffer->texture(Framebuffer::DEPTH), shared_ptr<Texture>(), m_gbuffer->texture(GBuffer::Field::CS_FACE_NORMAL), m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);

        // Compute shadow maps and forward-render visible surfaces
        environment.ambientOcclusion = m_ambientOcclusion;
        Surface::render(rd, activeCamera()->frame(), activeCamera()->projection(), sortedVisibleSurfaces, allSurfaces, environment);
                
        // Call to make the App show the output of debugDraw(...)
        drawDebugShapes();
        scene()->visualize(rd, shared_ptr<Entity>(), sceneVisualizationSettings());

        // Post-process special effects
        m_depthOfField->apply(rd, m_framebuffer->texture(0), m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
        
        m_motionBlur->apply(rd, m_framebuffer->texture(0), m_gbuffer->texture(GBuffer::Field::SS_POSITION_CHANGE), m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), 
                            m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
    } rd->popState();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    rd->push2D(m_finalFramebuffer); {
        rd->clear();
        m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_framebuffer->texture(0));
    } rd->pop2D();

    // Copy the final buffer to the server screen
    rd->push2D(); {
        Draw::rect2D(m_finalFramebuffer->texture(0)->rect2DBounds(), rd, Color3::white(), m_finalFramebuffer->texture(0));
    } rd->pop2D();

    clientSetMutex.lock();
    screenPrintf("Number of clients: %d\n", clientSet.size());
    //screenPrintf("clientWantsImage: %d\n", clientWantsImage.value());

    if ((clientWantsImage.value() != 0) && (clientSet.size() > 0)) {
        // Send the image to the first client
        mg_connection* conn = *clientSet.begin();

        // JPEG encoding/decoding takes more time but substantially less bandwidth than PNG
        mg_websocket_write_image(conn, m_finalBuffer->toImage(ImageFormat::RGB8()), Image::JPEG);
        clientWantsImage = 0;
    }
    clientSetMutex.unlock();

}
Example #13
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) {
    if (! scene()) {
        return;
    }
    
    m_gbuffer->setSpecification(m_gbufferSpecification);
    m_gbuffer->resize(m_framebuffer->width(), m_framebuffer->height());

    // Share the depth buffer with the forward-rendering pipeline
    m_framebuffer->set(Framebuffer::DEPTH, m_gbuffer->texture(GBuffer::Field::DEPTH_AND_STENCIL));

    m_depthPeelFramebuffer->resize(m_framebuffer->width(), m_framebuffer->height());

    // Bind the main framebuffer
    rd->pushState(m_framebuffer); {
        rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());

        m_gbuffer->prepare(rd, activeCamera(),  0, -(float)previousSimTimeStep(), m_settings.depthGuardBandThickness, m_settings.colorGuardBandThickness);
        rd->clear();
        
        // Cull and sort
        Array<shared_ptr<Surface> > sortedVisibleSurfaces;
        Surface::cull(activeCamera()->frame(), activeCamera()->projection(), rd->viewport(), allSurfaces, sortedVisibleSurfaces);
        Surface::sortBackToFront(sortedVisibleSurfaces, activeCamera()->frame().lookVector());
        
        const bool renderTransmissiveSurfaces = false;

        // Intentionally copy the lighting environment for mutation
        LightingEnvironment environment = scene()->lightingEnvironment();
        environment.ambientOcclusion = m_ambientOcclusion;
       
        // Render z-prepass and G-buffer.
        Surface::renderIntoGBuffer(rd, sortedVisibleSurfaces, m_gbuffer, activeCamera()->previousFrame(), activeCamera()->expressivePreviousFrame(), renderTransmissiveSurfaces);

        // This could be the OR of several flags; the starter begins with only one motivating algorithm for depth peel
        const bool needDepthPeel = environment.ambientOcclusionSettings.useDepthPeelBuffer;
        if (needDepthPeel) {
            rd->pushState(m_depthPeelFramebuffer); {
                rd->clear();
                rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());
                Surface::renderDepthOnly(rd, sortedVisibleSurfaces, CullFace::BACK, renderTransmissiveSurfaces, m_framebuffer->texture(Framebuffer::DEPTH), environment.ambientOcclusionSettings.depthPeelSeparationHint);
            } rd->popState();
        }
        

        if (! m_settings.colorGuardBandThickness.isZero()) {
            rd->setGuardBandClip2D(m_settings.colorGuardBandThickness);
        }        

        // Compute AO
        m_ambientOcclusion->update(rd, environment.ambientOcclusionSettings, activeCamera(), m_framebuffer->texture(Framebuffer::DEPTH), m_depthPeelFramebuffer->texture(Framebuffer::DEPTH), m_gbuffer->texture(GBuffer::Field::CS_FACE_NORMAL), m_gbuffer->specification().encoding[GBuffer::Field::CS_FACE_NORMAL], m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);


        RealTime lightingChangeTime = max(scene()->lastEditingTime(), max(scene()->lastLightChangeTime(), scene()->lastVisibleChangeTime()));
        bool updateShadowMaps = false;
        if (lightingChangeTime > m_lastLightingChangeTime) {
            m_lastLightingChangeTime = lightingChangeTime;
            updateShadowMaps = true;
        }
        // No need to write depth, since it was covered by the gbuffer pass
        //rd->setDepthWrite(false);
        // Compute shadow maps and forward-render visible surfaces
        Surface::render(rd, activeCamera()->frame(), activeCamera()->projection(), sortedVisibleSurfaces, allSurfaces, environment, Surface::ALPHA_BINARY, updateShadowMaps, m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
        
        if (m_showWireframe) {
            Surface::renderWireframe(rd, sortedVisibleSurfaces);
        }
                
        // Call to make the App show the output of debugDraw(...)
        drawDebugShapes();
        scene()->visualize(rd, sceneVisualizationSettings());

        // Post-process special effects
        m_depthOfField->apply(rd, m_framebuffer->texture(0), m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
        
        m_motionBlur->apply(rd, m_framebuffer->texture(0), m_gbuffer->texture(GBuffer::Field::SS_EXPRESSIVE_MOTION), 
                            m_gbuffer->specification().encoding[GBuffer::Field::SS_EXPRESSIVE_MOTION], m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), 
                            m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);

    } rd->popState();

    // We're about to render to the actual back buffer, so swap the buffers now.
    // This call also allows the screenshot and video recording to capture the
    // previous frame just before it is displayed.
    swapBuffers();

	// Clear the entire screen (needed even though we'll render over it, since
    // AFR uses clear() to detect that the buffer is not re-used.)
    rd->clear();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_framebuffer->texture(0));
}
Example #14
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) {
    // Bind the main frameBuffer
    rd->pushState(m_frameBuffer); {

        rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());

        m_gbuffer->resize(rd->width(), rd->height());
        m_gbuffer->prepare(rd, activeCamera(),  0, -(float)previousSimTimeStep(), m_settings.depthGuardBandThickness, m_settings.colorGuardBandThickness);
        if (! m_settings.colorGuardBandThickness.isZero()) {
            rd->setGuardBandClip2D(m_settings.colorGuardBandThickness);
        }        
        
        // Cull and sort
        Array<shared_ptr<Surface> > sortedVisibleSurfaces;
        Surface::cull(activeCamera()->frame(), activeCamera()->projection(), rd->viewport(), allSurfaces, sortedVisibleSurfaces);
        Surface::sortBackToFront(sortedVisibleSurfaces, activeCamera()->frame().lookVector());
        
        // Early-Z pass
        static const bool renderTransmissiveSurfaces = false;
        Surface::renderDepthOnly(rd, sortedVisibleSurfaces, CullFace::BACK, renderTransmissiveSurfaces);
        rd->setDepthWrite(false);
        
        // Render velocity buffer (if needed)
        if (activeCamera()->motionBlurSettings().enabled()) {
            Surface::renderIntoGBuffer(rd, sortedVisibleSurfaces, m_gbuffer, activeCamera()->previousFrame());
        }
        LocalLightingEnvironment environment = m_scene->localLightingEnvironment();
        environment.ambientOcclusion = m_ambientOcclusion;

        // Compute AO
        m_ambientOcclusion->update(rd, environment.ambientOcclusionSettings, activeCamera(), m_depthBuffer);
        
        // Compute shadow maps and forward-render visible surfaces
        Surface::render(rd, activeCamera()->frame(), activeCamera()->projection(), sortedVisibleSurfaces, allSurfaces, environment);
        rd->setDepthWrite(true);
        
        // Call to make the App show the output of debugDraw(...)
        drawDebugShapes();
        
        // Post-process special effects
        m_depthOfField->apply(rd, m_colorBuffer0, m_depthBuffer, activeCamera(), m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
        
        m_motionBlur->apply(rd, m_colorBuffer0, m_gbuffer->texture(GBuffer::Field::SS_POSITION_CHANGE), 
                            m_gbuffer->specification().encoding[GBuffer::Field::SS_POSITION_CHANGE], m_depthBuffer, activeCamera(), 
                            m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);

    } rd->popState();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_colorBuffer0, 1);

    screenPrintf("WASD to move");
    screenPrintf("Mouse to turn");
    screenPrintf("Space to jump");
}
Example #15
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) {
    if (! scene()) {
        return;
    }

    // Bind the main frameBuffer
    rd->pushState(m_frameBuffer); {
        rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());

        m_gbuffer->resize(rd->width(), rd->height());
        m_gbuffer->prepare(rd, activeCamera(),  0, -(float)previousSimTimeStep(), m_settings.depthGuardBandThickness, m_settings.colorGuardBandThickness);
        rd->clear();
        
        // Cull and sort
        Array<shared_ptr<Surface> > sortedVisibleSurfaces;
        Surface::cull(activeCamera()->frame(), activeCamera()->projection(), rd->viewport(), allSurfaces, sortedVisibleSurfaces);
        Surface::sortBackToFront(sortedVisibleSurfaces, activeCamera()->frame().lookVector());
        
        const bool renderTransmissiveSurfaces = false;

        // Intentionally copy the lighting environment for mutation
        LocalLightingEnvironment environment = scene()->localLightingEnvironment();
        environment.ambientOcclusion = m_ambientOcclusion;
       
        // Render z-prepass and G-buffer.  In this default implementation, it is needed if motion blur is enabled (for velocity) or
        // if face normals have been allocated and ambient occlusion is enabled.
        Surface::renderIntoGBuffer(rd, sortedVisibleSurfaces, m_gbuffer, activeCamera()->previousFrame(), renderTransmissiveSurfaces);

        if (! m_settings.colorGuardBandThickness.isZero()) {
            rd->setGuardBandClip2D(m_settings.colorGuardBandThickness);
        }        

        // Compute AO
        m_ambientOcclusion->update(rd, environment.ambientOcclusionSettings, activeCamera(), m_frameBuffer->texture(Framebuffer::DEPTH), shared_ptr<Texture>(), m_gbuffer->texture(GBuffer::Field::CS_FACE_NORMAL), m_gbuffer->specification().encoding[GBuffer::Field::CS_FACE_NORMAL], m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);

        // No need to write depth, since it was covered by the gbuffer pass
        //rd->setDepthWrite(false);
        // Compute shadow maps and forward-render visible surfaces
        Surface::render(rd, activeCamera()->frame(), activeCamera()->projection(), sortedVisibleSurfaces, allSurfaces, environment, Surface::ALPHA_BINARY, true, m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
        
        if (m_showWireframe) {
            Surface::renderWireframe(rd, sortedVisibleSurfaces);
        }
                
        // Call to make the App show the output of debugDraw(...)
        drawDebugShapes();
        scene()->visualize(rd, sceneVisualizationSettings());

        // Post-process special effects
        m_depthOfField->apply(rd, m_colorBuffer0, m_depthBuffer, activeCamera(), m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
        
        m_motionBlur->apply(rd, m_colorBuffer0, m_gbuffer->texture(GBuffer::Field::SS_POSITION_CHANGE), 
                            m_gbuffer->specification().encoding[GBuffer::Field::SS_POSITION_CHANGE], m_depthBuffer, activeCamera(), 
                            m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);

    } rd->popState();

    // We're about to render to the actual back buffer, so swap the buffers now.
    // This call also allows the screenshot and video recording to capture the
    // previous frame just before it is displayed.
    swapBuffers();

	// Clear the entire screen (needed even though we'll render over it, since
    // AFR uses clear() to detect that the buffer is not re-used.)
    rd->clear();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_colorBuffer0);
}
Example #16
0
void App::onUserInput(UserInput* ui) {
    GApp::onUserInput(ui);
    if (ui->keyDown(GKey::SPACE)) {
        m_appMode = AppMode::MAKING_SCULPTURE;
    } else {
        m_appMode = AppMode::DEFAULT;
    }

    if (ui->keyDown(GKey::LCTRL) || ui->keyDown(GKey::RCTRL)) {
        if (ui->keyPressed(GKey('s'))) {
            saveSoundscape();
        }
        if (ui->keyPressed(GKey('l'))) {
            loadSoundscape();
        }
    }

	if (ui->keyPressed(GKey::RETURN)) {


        Point3 origin = Point3(0.0, 0.0, 0.0);
        Vector3 direction = activeCamera()->frame().rightVector();
        Ray playRay(origin, -direction);
        float minValue = finf();
        for (auto& sculpture : m_sonicSculpturePieces) {
            minValue = min(minValue, sculpture->minValueAlongRay(playRay));
        }
        
        playRay.set(origin + direction*minValue, direction);
		playSculpture(playRay);
	}

    if (ui->keyPressed(GKey('f'))) {
        m_freezeEverything = !m_freezeEverything;
        for (shared_ptr<SonicSculpturePiece>& piece : m_sonicSculpturePieces) {
            piece->setFrozen(m_freezeEverything);
        }
    }

    if (ui->keyDown(GKey(','))) {
        cameraAdjustment -= 0.01f;
    }

    if (ui->keyDown(GKey('.'))) {
        cameraAdjustment += 0.01f;
    }

    // Hack for playing pieces
    for (int i = 0; i < 8; ++i) {
        if (m_sonicSculpturePieces.size() > i) {
            if (ui->keyPressed(GKey('1' + char(i)))) {
                generatePlayPulse(m_sonicSculpturePieces[i]);
                m_lastInterestingEventTime = System::time();
            }
        }
    }


    // Add key handling here based on the keys currently held or
    // ones that changed in the last frame.
}
Example #17
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) {
    // This implementation is equivalent to the default GApp's. It is repeated here to make it
    // easy to modify rendering. If you don't require custom rendering, just delete this
    // method from your application and rely on the base class.

    if (!scene()) {
        if ((submitToDisplayMode() == SubmitToDisplayMode::MAXIMIZE_THROUGHPUT) && (!rd->swapBuffersAutomatically())) {
            swapBuffers();
        }
        rd->clear();
        rd->pushState(); {
            rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());
            drawDebugShapes();
        } rd->popState();
        return;
    }

    updateAudioData();

    if (System::time() - m_lastInterestingEventTime > 10.0) {
        if (Random::common().uniform() < 0.001f) {
            if (m_sonicSculpturePieces.size() > 0) {
                int index = Random::common().integer(0, m_sonicSculpturePieces.size() - 1);
                generatePlayPulse(m_sonicSculpturePieces[index]);
                m_lastInterestingEventTime = System::time();
            }
        }
    }
    handlePlayPulses();

    GBuffer::Specification gbufferSpec = m_gbufferSpecification;
    extendGBufferSpecification(gbufferSpec);
    m_gbuffer->setSpecification(gbufferSpec);
    m_gbuffer->resize(m_framebuffer->width(), m_framebuffer->height());
    m_gbuffer->prepare(rd, activeCamera(), 0, -(float)previousSimTimeStep(), m_settings.depthGuardBandThickness, m_settings.colorGuardBandThickness);

    m_renderer->render(rd, m_framebuffer, m_depthPeelFramebuffer, scene()->lightingEnvironment(), m_gbuffer, allSurfaces);

    // Debug visualizations and post-process effects
    rd->pushState(m_framebuffer); {
        // Call to make the App show the output of debugDraw(...)
        rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());

        for (auto& piece : m_sonicSculpturePieces) {
            piece->render(rd, scene()->lightingEnvironment());
        }
        if (notNull(m_currentSonicSculpturePiece)) {
            m_currentSonicSculpturePiece->render(rd, scene()->lightingEnvironment());
        }
        for (int i = m_playPlanes.size() - 1; i >= 0; --i) {
	        const PlayPlane& pp = m_playPlanes[i];
	        if (pp.endWindowIndex < g_sampleWindowIndex) {
	            m_playPlanes.remove(i);
	        }

	        Point3 point = pp.origin + (pp.direction * METERS_PER_SAMPLE_WINDOW * (g_sampleWindowIndex-pp.beginWindowIndex));

	        Color4 solidColor(1.0f, .02f, .03f, .15f);
	        //	  Plane plane(point, pp.direction);
	        //	  Draw::plane(plane, rd, solidColor, Color4::clear());

	        CFrame planeFrame = pp.frame;
	        planeFrame.translation = point;
            Vector3 minP(finf(), finf(), finf());
            Vector3 maxP(-finf(), -finf(), -finf());
            for (auto& piece : m_sonicSculpturePieces) {
                piece->minMaxValue(planeFrame, minP, maxP);
            }
            Box b(Vector3(minP.xy()-Vector2(3,3), 0.0f), Vector3(maxP.xy() + Vector2(3, 3), 0.1f), planeFrame);

            Draw::box(b, rd, solidColor, Color4::clear());
        }


        drawDebugShapes();
        const shared_ptr<Entity>& selectedEntity = (notNull(developerWindow) && notNull(developerWindow->sceneEditorWindow)) ? developerWindow->sceneEditorWindow->selectedEntity() : shared_ptr<Entity>();
        scene()->visualize(rd, selectedEntity, allSurfaces, sceneVisualizationSettings(), activeCamera());

        // Post-process special effects
        m_depthOfField->apply(rd, m_framebuffer->texture(0), m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);

        m_motionBlur->apply(rd, m_framebuffer->texture(0), m_gbuffer->texture(GBuffer::Field::SS_EXPRESSIVE_MOTION),
            m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(),
            m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
    } rd->popState();

    rd->push2D(m_framebuffer); {
        rd->setBlendFunc(RenderDevice::BLEND_SRC_ALPHA, RenderDevice::BLEND_ONE_MINUS_SRC_ALPHA);
        Array<SoundInstance> soundInstances;
        Synthesizer::global->getSoundInstances(soundInstances);
        int xOffset = 10;
        Vector2 dim(256,100);
        for (int i = 0; i < soundInstances.size(); ++i) {
            int yOffset = rd->height() - 120 - (120 * i);
            Draw::rect2D(Rect2D::xywh(Point2(xOffset, yOffset), dim), rd, Color3::white(), soundInstances[i].displayTexture());
            float playheadAlpha = ((float)soundInstances[i].currentPosition) / soundInstances[i].audioSample->buffer.size();
            float playheadX = xOffset + (playheadAlpha * dim.x);
            Draw::rect2D(Rect2D::xywh(Point2(playheadX, yOffset), Vector2(1, dim.y)), rd, Color3::yellow());
        }


	
	static shared_ptr<GFont> font = GFont::fromFile(System::findDataFile("arial.fnt"));
	float time = System::time() - m_initialTime;
	if (time < 10.0f) {
	  float fontAlpha = time < 9.0f ? 1.0f : 10.0f - time;
	  font->draw2D(rd, "Press Space to Sculpt", Vector2(rd->width()/2, rd->height()-100.0f), 30.0f, Color4(Color3::black(), fontAlpha), Color4(Color3::white()*0.6f, fontAlpha), GFont::XALIGN_CENTER);
	}
    } rd->pop2D();

    if ((submitToDisplayMode() == SubmitToDisplayMode::MAXIMIZE_THROUGHPUT) && (!renderDevice->swapBuffersAutomatically())) {
        // We're about to render to the actual back buffer, so swap the buffers now.
        // This call also allows the screenshot and video recording to capture the
        // previous frame just before it is displayed.
        swapBuffers();
    }

    // Clear the entire screen (needed even though we'll render over it, since
    // AFR uses clear() to detect that the buffer is not re-used.)
    rd->clear();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_framebuffer->texture(0));
}