void DrawBackgroundDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());

    const auto& inItems = inputs.get0();
    const auto& lightingModel = inputs.get1();
    if (!lightingModel->isBackgroundEnabled()) {
        return;
    }

    RenderArgs* args = renderContext->args;
    doInBatch(args->_context, [&](gpu::Batch& batch) {
        args->_batch = &batch;
    //    _gpuTimer.begin(batch);

        batch.enableSkybox(true);
        
        batch.setViewportTransform(args->_viewport);
        batch.setStateScissorRect(args->_viewport);

        glm::mat4 projMat;
        Transform viewMat;
        args->getViewFrustum().evalProjectionMatrix(projMat);
        args->getViewFrustum().evalViewTransform(viewMat);

        batch.setProjectionTransform(projMat);
        batch.setViewTransform(viewMat);

        renderItems(sceneContext, renderContext, inItems);
     //   _gpuTimer.end(batch);
    });
    args->_batch = nullptr;

   // std::static_pointer_cast<Config>(renderContext->jobConfig)->gpuTime = _gpuTimer.getAverage();
}
void DrawDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());

    auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);

    const auto& inItems = inputs.get0();
    const auto& lightingModel = inputs.get1();

    RenderArgs* args = renderContext->args;

    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
        args->_batch = &batch;
        
        // Setup camera, projection and viewport for all items
        batch.setViewportTransform(args->_viewport);
        batch.setStateScissorRect(args->_viewport);

        glm::mat4 projMat;
        Transform viewMat;
        args->getViewFrustum().evalProjectionMatrix(projMat);
        args->getViewFrustum().evalViewTransform(viewMat);

        batch.setProjectionTransform(projMat);
        batch.setViewTransform(viewMat);

        // Setup lighting model for all items;
        batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());

        renderShapes(sceneContext, renderContext, _shapePlumber, inItems, _maxDrawn);
        args->_batch = nullptr;
    });

    config->setNumDrawn((int)inItems.size());
}
void ResolveFramebuffer::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
    RenderArgs* args = renderContext->args;
    auto srcFbo = inputs.get0();
    auto destFbo = inputs.get1();

    if (!destFbo) {
        destFbo = args->_blitFramebuffer;
    }
    outputs = destFbo;

    // Check valid src and dest
    if (!srcFbo || !destFbo) {
        return;
    }
    
    // Check valid size for sr and dest
    auto frameSize(srcFbo->getSize());
    if (destFbo->getSize() != frameSize) {
        return;
    }

    gpu::Vec4i rectSrc;
    rectSrc.z = frameSize.x;
    rectSrc.w = frameSize.y;
    gpu::doInBatch("Resolve", args->_context, [&](gpu::Batch& batch) { 
        batch.blit(srcFbo, rectSrc, destFbo, rectSrc);
    });
}
Beispiel #4
0
void PrepareDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
    auto args = renderContext->args;

    auto primaryFramebuffer = inputs.get0();
    auto lightingModel = inputs.get1();

    if (!_deferredFramebuffer) {
        _deferredFramebuffer = std::make_shared<DeferredFramebuffer>();
    }
    _deferredFramebuffer->updatePrimaryDepth(primaryFramebuffer->getDepthStencilBuffer());

    outputs.edit0() = _deferredFramebuffer;
    outputs.edit1() = _deferredFramebuffer->getLightingFramebuffer();


    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
        batch.enableStereo(false);
        batch.setViewportTransform(args->_viewport);
        batch.setStateScissorRect(args->_viewport);

        // Clear deferred
        auto deferredFbo = _deferredFramebuffer->getDeferredFramebuffer();
        batch.setFramebuffer(deferredFbo);

        // Clear Color, Depth and Stencil for deferred buffer
        batch.clearFramebuffer(
            gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 | gpu::Framebuffer::BUFFER_COLOR2 | gpu::Framebuffer::BUFFER_COLOR3 |
            gpu::Framebuffer::BUFFER_DEPTH |
            gpu::Framebuffer::BUFFER_STENCIL,
            vec4(vec3(0), 0), 1.0, 0.0, true);

        // For the rest of the rendering, bind the lighting model
        batch.setUniformBuffer(LIGHTING_MODEL_BUFFER_SLOT, lightingModel->getParametersBuffer());
    });
}
Beispiel #5
0
void LightClusteringPass::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& output) {
    auto args = renderContext->args;
    
    auto deferredTransform = inputs.get0();
    auto lightingModel = inputs.get1();
    auto surfaceGeometryFramebuffer = inputs.get2();
    
    
    if (!_lightClusters) {
        _lightClusters = std::make_shared<LightClusters>();
    }
    
    // first update the Grid with the new frustum
    if (!_freeze) {
        _lightClusters->updateFrustum(args->getViewFrustum());
    }
    
    // From the LightStage and the current frame, update the light cluster Grid
    auto deferredLightingEffect = DependencyManager::get<DeferredLightingEffect>();
    auto lightStage = deferredLightingEffect->getLightStage();
    _lightClusters->updateLightStage(lightStage);
    _lightClusters->updateLightFrame(lightStage->_currentFrame, lightingModel->isPointLightEnabled(), lightingModel->isSpotLightEnabled());
    
    auto clusteringStats = _lightClusters->updateClusters();

    output = _lightClusters;

    auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
    config->numSceneLights = lightStage->getNumLights();
    config->numFreeSceneLights = lightStage->getNumFreeLights();
    config->numAllocatedSceneLights = lightStage->getNumAllocatedLights();
    config->setNumInputLights(clusteringStats.x);
    config->setNumClusteredLights(clusteringStats.y);
    config->setNumClusteredLightReferences(clusteringStats.z);
}
void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());

    RenderArgs* args = renderContext->args;

    const auto& frameTransform = inputs.get0();
    const auto& linearDepthFramebuffer = inputs.get2();
    
    auto linearDepthTexture = linearDepthFramebuffer->getLinearDepthTexture();
    auto sourceViewport = args->_viewport;
    auto occlusionViewport = sourceViewport;

    if (!_gpuTimer) {
        _gpuTimer = std::make_shared < gpu::RangeTimer>(__FUNCTION__);
    }

    if (!_framebuffer) {
        _framebuffer = std::make_shared<AmbientOcclusionFramebuffer>();
    }
    
    if (_parametersBuffer->getResolutionLevel() > 0) {
        linearDepthTexture = linearDepthFramebuffer->getHalfLinearDepthTexture();
        occlusionViewport = occlusionViewport >> _parametersBuffer->getResolutionLevel();
    }
Beispiel #7
0
void BlurGaussianDepthAware::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& SourceAndDepth, gpu::FramebufferPointer& blurredFramebuffer) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());

    RenderArgs* args = renderContext->args;

    auto& sourceFramebuffer = SourceAndDepth.get0();
    auto& depthTexture = SourceAndDepth.get1();

    BlurInOutResource::Resources blurringResources;
    if (!_inOutResources.updateResources(sourceFramebuffer, blurringResources)) {
        // early exit if no valid blurring resources
        return;
    }
    
    blurredFramebuffer = blurringResources.finalFramebuffer;

    auto blurVPipeline = getBlurVPipeline();
    auto blurHPipeline = getBlurHPipeline();

    auto sourceViewport = args->_viewport;

    _parameters->setWidthHeight(sourceViewport.z, sourceViewport.w, args->_context->isStereo());
    glm::ivec2 textureSize(blurringResources.sourceTexture->getDimensions());
    _parameters->setTexcoordTransform(gpu::Framebuffer::evalSubregionTexcoordTransformCoefficients(textureSize, sourceViewport));
    _parameters->setDepthPerspective(args->getViewFrustum().getProjection()[1][1]);
    _parameters->setLinearDepthPosFar(args->getViewFrustum().getFarClip());

    gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
        batch.enableStereo(false);
        batch.setViewportTransform(sourceViewport);

        batch.setUniformBuffer(BlurTask_ParamsSlot, _parameters->_parametersBuffer);

        batch.setResourceTexture(BlurTask_DepthSlot, depthTexture);

        batch.setFramebuffer(blurringResources.blurringFramebuffer);
        // batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0));

        batch.setPipeline(blurVPipeline);
        batch.setResourceTexture(BlurTask_SourceSlot, blurringResources.sourceTexture);
        batch.draw(gpu::TRIANGLE_STRIP, 4);

        batch.setFramebuffer(blurringResources.finalFramebuffer);
        if (_inOutResources._generateOutputFramebuffer) {
            // batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0));
        }

        batch.setPipeline(blurHPipeline);
        batch.setResourceTexture(BlurTask_SourceSlot, blurringResources.blurringTexture);
        batch.draw(gpu::TRIANGLE_STRIP, 4);

        batch.setResourceTexture(BlurTask_SourceSlot, nullptr);
        batch.setResourceTexture(BlurTask_DepthSlot, nullptr);
        batch.setUniformBuffer(BlurTask_ParamsSlot, nullptr);
    });
}
Beispiel #8
0
void InputsTest::test_arrange_descriptions(void)
{
   message += "test_arrange_descriptions\n";

   Inputs i;

   Vector<std::string> descriptions = i.arrange_descriptions();

   assert_true(descriptions.size() == 0, LOG);
}
void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs& inputs) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());

    auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);

    const auto& inItems = inputs.get0();
    const auto& lightingModel = inputs.get1();
    const auto jitter = inputs.get2();
    
    config->setNumDrawn((int)inItems.size());
    emit config->numDrawnChanged();

    RenderArgs* args = renderContext->args;

    // Clear the framebuffer without stereo
    // Needs to be distinct from the other batch because using the clear call 
    // while stereo is enabled triggers a warning
    if (_opaquePass) {
        gpu::doInBatch("DrawLayered3D::run::clear", args->_context, [&](gpu::Batch& batch) {
            batch.enableStereo(false);
            batch.clearFramebuffer(gpu::Framebuffer::BUFFER_DEPTH, glm::vec4(), 1.f, 0, false);
        });
    }

    if (!inItems.empty()) {
        // Render the items
        gpu::doInBatch("DrawLayered3D::main", args->_context, [&](gpu::Batch& batch) {
            args->_batch = &batch;
            batch.setViewportTransform(args->_viewport);
            batch.setStateScissorRect(args->_viewport);

            glm::mat4 projMat;
            Transform viewMat;
            args->getViewFrustum().evalProjectionMatrix(projMat);
            args->getViewFrustum().evalViewTransform(viewMat);

            batch.setProjectionTransform(projMat);
            batch.setProjectionJitter(jitter.x, jitter.y);
            batch.setViewTransform(viewMat);

            // Setup lighting model for all items;
            batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
            batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());

            if (_opaquePass) {
                renderStateSortShapes(renderContext, _shapePlumber, inItems, _maxDrawn);
            } else {
                renderShapes(renderContext, _shapePlumber, inItems, _maxDrawn);
            }
            args->_batch = nullptr;
        });
    }
}
Beispiel #10
0
void InputsTest::test_arrange_units(void)
{
   message += "test_arrange_units\n";

   Inputs i;

   Vector<std::string> units = i.arrange_units();

   assert_true(units.size() == 0, LOG);

}
Beispiel #11
0
void InputsTest::test_to_XML(void)
{
   message += "test_to_XML\n";

   Inputs  i;

   tinyxml2::XMLDocument* document;

   document = i.to_XML();

   assert_true(document != NULL, LOG);

   delete document;
}
Beispiel #12
0
void Game::init(Inputs inputs, shared_ptr<GameStateObserver> state_observer) {
    REQUIRE(!initialised);
    REQUIRE(inputs.size() == PLAYER_COUNT);
    this->inputs = inputs;
    this->state_observer = state_observer;
    initialised = true;
}
Beispiel #13
0
Inputs Game::make_inputs(const vector<vector<Action>>& paths) {
    REQUIRE(paths.size() == PLAYER_COUNT);

    Inputs inputs;
    for (unsigned int player_index = 0u; player_index < PLAYER_COUNT; ++player_index) {
        const auto& path = paths.at(player_index);
        if (!path.empty()) {
            inputs.push_back(shared_ptr<Input>(new PlaybackInput(path)));
        }
        else {
            inputs.push_back(shared_ptr<Input>(new ZeroInput));
        }
    }
    
    return inputs;
}
Beispiel #14
0
void InputsTest::test_get_inputs_number(void)
{
   message += "test_get_inputs_number\n";

   Inputs i;

   // Test

   i.set();
   assert_true(i.get_inputs_number() == 0, LOG);

   // Test

   i.set(1);
   assert_true(i.get_inputs_number() == 1, LOG);
}
Beispiel #15
0
void InputsTest::test_from_XML(void)
{
   message += "test_from_XML\n";

   Inputs  i;

   tinyxml2::XMLDocument* document;

   // Test

   document = i.to_XML();

   i.from_XML(*document);

   delete document;
}
Beispiel #16
0
void DrawAABox::run(const render::RenderContextPointer& renderContext, const Inputs& box) {
    if (!box.isNull()) {
        static const uint8_t indexData[] = {
            0, 1,
            1, 2,
            2, 3,
            3, 0,
            4, 5,
            5, 6,
            6, 7,
            7, 4,
            0, 4,
            1, 5,
            3, 7,
            2, 6
        };

        if (!_cubeMeshIndices._buffer) {
            auto indices = std::make_shared<gpu::Buffer>(sizeof(indexData), indexData);
            _cubeMeshIndices = gpu::BufferView(indices, gpu::Element(gpu::SCALAR, gpu::UINT8, gpu::INDEX));
        }

        glm::vec3 vertices[8];

        getVertices(box, vertices);

        DrawQuadVolume::run(renderContext, vertices, _cubeMeshIndices, sizeof(indexData) / sizeof(indexData[0]));
    }
}
void DrawOverlay3D::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());

    auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);

    const auto& inItems = inputs.get0();
    const auto& lightingModel = inputs.get1();
    
    config->setNumDrawn((int)inItems.size());
    emit config->numDrawnChanged();

    if (!inItems.empty()) {
        RenderArgs* args = renderContext->args;

        // Clear the framebuffer without stereo
        // Needs to be distinct from the other batch because using the clear call 
        // while stereo is enabled triggers a warning
        if (_opaquePass) {
            gpu::Batch batch;
            batch.enableStereo(false);
            batch.clearFramebuffer(gpu::Framebuffer::BUFFER_DEPTH, glm::vec4(), 1.f, 0, true);
            args->_context->render(batch);
        }

        // Render the items
        gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
            args->_batch = &batch;
            batch.setViewportTransform(args->_viewport);
            batch.setStateScissorRect(args->_viewport);

            glm::mat4 projMat;
            Transform viewMat;
            args->getViewFrustum().evalProjectionMatrix(projMat);
            args->getViewFrustum().evalViewTransform(viewMat);

            batch.setProjectionTransform(projMat);
            batch.setViewTransform(viewMat);

            // Setup lighting model for all items;
            batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());

            renderShapes(sceneContext, renderContext, _shapePlumber, inItems, _maxDrawn);
            args->_batch = nullptr;
        });
    }
}
Beispiel #18
0
RiscOperators::Cursor::Inputs
RiscOperators::Cursor::inputs(const BaseSemantics::SValuePtr &arg1, const BaseSemantics::SValuePtr &arg2,
                              const BaseSemantics::SValuePtr &arg3)
{
    ASSERT_require((arg1==NULL && arg2==NULL && arg3==NULL) ||
                   (arg1!=NULL && arg2==NULL && arg3==NULL) ||
                   (arg1!=NULL && arg2!=NULL && arg3==NULL) ||
                   (arg1!=NULL && arg2!=NULL && arg3!=NULL));
    Inputs inputs;
    if (arg1!=NULL)
        inputs.push_back(SValue::promote(arg1));
    if (arg2!=NULL)
        inputs.push_back(SValue::promote(arg2));
    if (arg3!=NULL)
        inputs.push_back(SValue::promote(arg3));
    return inputs;
}
Beispiel #19
0
void RenderDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
    auto deferredTransform = inputs.get0();
    auto deferredFramebuffer = inputs.get1();
    auto lightingModel = inputs.get2();
    auto surfaceGeometryFramebuffer = inputs.get3();
    auto ssaoFramebuffer = inputs.get4();
    auto subsurfaceScatteringResource = inputs.get5();
    auto args = renderContext->args;

    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
       _gpuTimer.begin(batch);
    });

    setupJob.run(sceneContext, renderContext, deferredTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, ssaoFramebuffer, subsurfaceScatteringResource);
    
    lightsJob.run(sceneContext, renderContext, deferredTransform, deferredFramebuffer, lightingModel);

    cleanupJob.run(sceneContext, renderContext);
    
     gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
         _gpuTimer.end(batch);
    });
    
    auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
    config->setGPUBatchRunTime(_gpuTimer.getGPUAverage(), _gpuTimer.getBatchAverage());
}
Beispiel #20
0
void DrawBounds::run(const RenderContextPointer& renderContext,
    const Inputs& items) {
    RenderArgs* args = renderContext->args;

    uint32_t numItems = (uint32_t) items.size();
    if (numItems == 0) {
        return;
    }

    static const uint32_t sizeOfItemBound = sizeof(ItemBound);
    if (!_drawBuffer) {
        _drawBuffer = std::make_shared<gpu::Buffer>(sizeOfItemBound);
    }

    _drawBuffer->setData(numItems * sizeOfItemBound, (const gpu::Byte*) items.data());

    gpu::doInBatch("DrawBounds::run", args->_context, [&](gpu::Batch& batch) {
        args->_batch = &batch;

        // Setup projection
        glm::mat4 projMat;
        Transform viewMat;
        args->getViewFrustum().evalProjectionMatrix(projMat);
        args->getViewFrustum().evalViewTransform(viewMat);
        batch.setProjectionTransform(projMat);
        batch.setViewTransform(viewMat);
        batch.setModelTransform(Transform());

        // Bind program
        batch.setPipeline(getPipeline());

        glm::vec4 color(glm::vec3(0.0f), -(float) numItems);
        batch._glUniform4fv(_colorLocation, 1, (const float*)(&color));
        batch.setResourceBuffer(0, _drawBuffer);

        static const int NUM_VERTICES_PER_CUBE = 24;
        batch.draw(gpu::LINES, NUM_VERTICES_PER_CUBE * numItems, 0);
    });
}
void DrawForward::run(const RenderContextPointer& renderContext, const Inputs& inputs) {
    RenderArgs* args = renderContext->args;

    const auto& inItems = inputs.get0();
    const auto& lightingModel = inputs.get1();

    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
        args->_batch = &batch;


        // Setup projection
        glm::mat4 projMat;
        Transform viewMat;
        args->getViewFrustum().evalProjectionMatrix(projMat);
        args->getViewFrustum().evalViewTransform(viewMat);
        batch.setProjectionTransform(projMat);
        batch.setViewTransform(viewMat);
        batch.setModelTransform(Transform());

        // Setup lighting model for all items;
        batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());

        // From the lighting model define a global shapeKey ORED with individiual keys
        ShapeKey::Builder keyBuilder;
        if (lightingModel->isWireframeEnabled()) {
            keyBuilder.withWireframe();
        }
        ShapeKey globalKey = keyBuilder.build();
        args->_globalShapeKey = globalKey._flags.to_ulong();

        // Render items
        renderStateSortShapes(renderContext, _shapePlumber, inItems, -1, globalKey);

        args->_batch = nullptr;
        args->_globalShapeKey = 0;
    });
}
void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs& inputs) {
    auto args = renderContext->args;

    auto deferredTransform = inputs.get0();
    auto deferredFramebuffer = inputs.get1();
    auto extraRenderBuffers = inputs.get2();
        auto surfaceGeometryFramebuffer = extraRenderBuffers.get0();
        auto ssaoFramebuffer = extraRenderBuffers.get1();
        auto subsurfaceScatteringResource = extraRenderBuffers.get2();

    auto lightingModel = inputs.get3();
    auto lightClusters = inputs.get4();
    
    const auto& lightFrame = inputs.get5();
    const auto& shadowFrame = inputs.get6();
    const auto& hazeFrame = inputs.get7();

    if (!_gpuTimer) {
        _gpuTimer = std::make_shared < gpu::RangeTimer>(__FUNCTION__);
    }

    auto previousBatch = args->_batch;
    gpu::doInBatch(nullptr, args->_context, [&](gpu::Batch& batch) {
        args->_batch = &batch;
        _gpuTimer->begin(batch);

        setupJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, lightFrame, shadowFrame, hazeFrame, surfaceGeometryFramebuffer, ssaoFramebuffer, subsurfaceScatteringResource);

        lightsJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, lightClusters);

        cleanupJob.run(renderContext);

        _gpuTimer->end(batch);
    });
     args->_batch = previousBatch;

    auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
    config->setGPUBatchRunTime(_gpuTimer->getGPUAverage(), _gpuTimer->getBatchAverage());
}
Beispiel #23
0
void XNodeDefinition::addCalculation( CalculationFunction func,
                            const Inputs &in,
                            const Outputs &out )
  {
  Calculation calc;
  calc.func = func;
  calc.inputIDs = in;
  calc.outputIDs = out;

  XVector<InputID> inVec(in.toVector());
  XVector<OutputID> outVec(out.toVector());

  foreach(const InputID &input, inVec)
    {
    _inputMap[input] << outVec;
    }

  foreach(const OutputID &output, outVec)
    {
    _outputMap[output] << inVec;
    }

  _calculations << calc;
  }
void MyoThread::applay(const Inputs& inputs,const DataFlags& flags,ApplyCallback callback)
{
    //size of row
    size_t count = 0;
    size_t rowSize = flags.lineSize<8>() / flags.mReps;
    //get max time
    double maxTime = inputs[inputs.size()-1].getTime();
    //for all rows
    for(const auto& row : inputs)
    {
        //index
        size_t i = 0;
        size_t irow = rowSize*count;
        //
        if(flags.mTime)
        {
            callback(i + irow, i, flags.toNormalize( row.getTime() , maxTime )); ++i;
        }
        if(flags.mGyroscope)
        {
            auto gyr = flags.apply( row.getGyroscope() );
            callback(i + irow, i, gyr.x()); ++i;
            callback(i + irow, i, gyr.y()); ++i;
            callback(i + irow, i, gyr.z()); ++i;
        }
        if(flags.mAccelerometer)
        {
            auto acc = flags.apply( row.getAccelerometer() );
            callback(i + irow, i, acc.x()); ++i;
            callback(i + irow, i, acc.y()); ++i;
            callback(i + irow, i, acc.z()); ++i;
            
        }
        if(flags.mQuaternion)
        {
            auto quad =  flags.apply( row.getQuaternion() );
            callback(i + irow, i, quad.x()); ++i;
            callback(i + irow, i, quad.y()); ++i;
            callback(i + irow, i, quad.z()); ++i;
            callback(i + irow, i, quad.z()); ++i;
        }
        if(flags.mPitch || flags.mYaw || flags.mRoll)
        {
            auto euler = row.getEulerAngles();
            
            if(flags.mPitch)
            {
                callback(i + irow, i, flags.apply( (double)euler.pitch() , (M_PI*2.0) )); ++i;
            }
            if(flags.mYaw)
            {
                callback(i + irow, i, flags.apply( (double)euler.yaw() , (M_PI*2.0) )); ++i;
            }
            if(flags.mRoll)
            {
                callback(i + irow, i, flags.apply( (double)euler.roll() , (M_PI*2.0) )); ++i;
            }
        }
        if(flags.mEmg)
        {
            for(auto emg:row.getEmg())
            {
                callback(i + irow, i,  flags.apply( ((double)emg) , 128.0 ));  ++i;
            }
        }
        //next
        ++count;
    }
}
Beispiel #25
0
void Antialiasing::run(const render::RenderContextPointer& renderContext, const Inputs& inputs) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());
    
    RenderArgs* args = renderContext->args;

    auto& deferredFrameTransform = inputs.get0();
    auto& sourceBuffer = inputs.get1();
    auto& linearDepthBuffer = inputs.get2();
    auto& velocityBuffer = inputs.get3();
    
    int width = sourceBuffer->getWidth();
    int height = sourceBuffer->getHeight();

    if (_antialiasingBuffers->get(0)) {
        if (_antialiasingBuffers->get(0)->getSize() != uvec2(width, height)) {// || (sourceBuffer && (_antialiasingBuffer->getRenderBuffer(1) != sourceBuffer->getRenderBuffer(0)))) {
            _antialiasingBuffers->edit(0).reset();
            _antialiasingBuffers->edit(1).reset();
            _antialiasingTextures[0].reset();
            _antialiasingTextures[1].reset();
        }
    }

    if (!_antialiasingBuffers->get(0)) {
        // Link the antialiasing FBO to texture
        for (int i = 0; i < 2; i++) {
            auto& antiAliasingBuffer = _antialiasingBuffers->edit(i);
            antiAliasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
            auto format = gpu::Element::COLOR_SRGBA_32; // DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
            auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
            _antialiasingTextures[i] = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
            antiAliasingBuffer->setRenderBuffer(0, _antialiasingTextures[i]);
        }
    }
    
    gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) {
        batch.enableStereo(false);
        batch.setViewportTransform(args->_viewport);

        // TAA step
        getAntialiasingPipeline();
        batch.setResourceFramebufferSwapChainTexture(AntialiasingPass_HistoryMapSlot, _antialiasingBuffers, 0);
        batch.setResourceTexture(AntialiasingPass_SourceMapSlot, sourceBuffer->getRenderBuffer(0));
        batch.setResourceTexture(AntialiasingPass_VelocityMapSlot, velocityBuffer->getVelocityTexture());
        // This is only used during debug
        batch.setResourceTexture(AntialiasingPass_DepthMapSlot, linearDepthBuffer->getLinearDepthTexture());

        batch.setUniformBuffer(AntialiasingPass_ParamsSlot, _params);
        batch.setUniformBuffer(AntialiasingPass_FrameTransformSlot, deferredFrameTransform->getFrameTransformBuffer());
        
        batch.setFramebufferSwapChain(_antialiasingBuffers, 1);
        batch.setPipeline(getAntialiasingPipeline());       
        batch.draw(gpu::TRIANGLE_STRIP, 4);

        // Blend step
        batch.setResourceTexture(AntialiasingPass_SourceMapSlot, nullptr);

        batch.setFramebuffer(sourceBuffer);
        if (_params->isDebug()) {
            batch.setPipeline(getDebugBlendPipeline());
        }  else {
            batch.setPipeline(getBlendPipeline());
            // Disable sharpen if FXAA
            batch._glUniform1f(_sharpenLoc, _sharpen * _params.get().regionInfo.z);
        }
        batch.setResourceFramebufferSwapChainTexture(AntialiasingPass_NextMapSlot, _antialiasingBuffers, 1);
        batch.draw(gpu::TRIANGLE_STRIP, 4);
        batch.advance(_antialiasingBuffers);
        
        batch.setUniformBuffer(AntialiasingPass_ParamsSlot, nullptr);
        batch.setUniformBuffer(AntialiasingPass_FrameTransformSlot, nullptr);

        batch.setResourceTexture(AntialiasingPass_DepthMapSlot, nullptr);
        batch.setResourceTexture(AntialiasingPass_HistoryMapSlot, nullptr);
        batch.setResourceTexture(AntialiasingPass_VelocityMapSlot, nullptr);
        batch.setResourceTexture(AntialiasingPass_NextMapSlot, nullptr);
    });
    
    args->popViewFrustum();
}
Beispiel #26
0
 friend inline void validate(Inputs& x) { x.validate(); }
Beispiel #27
0
int main(void)
{
   try            
   {
      std::cout << "OpenNN. Yacht Resistance Design Application." << std::endl;	

      srand((unsigned)time(NULL));

      // Data set

      DataSet data_set;

      data_set.set_data_file_name("../data/yachtresistance.dat");

      data_set.load_data();

	  // Variables

	  Variables* variables_pointer = data_set.get_variables_pointer();

	  variables_pointer->set_name(0, "longitudinal_center_buoyancy");
      variables_pointer->set_name(1, "prismatic_coefficient");
	  variables_pointer->set_name(2, "length_displacement_ratio");
	  variables_pointer->set_name(3, "beam_draught_ratio");
	  variables_pointer->set_name(4, "length_beam_ratio");
	  variables_pointer->set_name(5, "froude_number");
	  variables_pointer->set_name(6, "residuary_resistance");

      const Matrix<std::string> inputs_information = variables_pointer->arrange_inputs_information();
      const Matrix<std::string> targets_information = variables_pointer->arrange_targets_information();

	  // Instances 

	  Instances* instances_pointer = data_set.get_instances_pointer();

      instances_pointer->split_random_indices();

      const Vector< Statistics<double> > inputs_statistics = data_set.scale_inputs_minimum_maximum();
      const Vector< Statistics<double> > targets_statistics = data_set.scale_targets_minimum_maximum();

      // Neural network

      const size_t inputs_number = data_set.get_variables().count_inputs_number();
      const size_t hidden_neurons_number = 30;
      const size_t outputs_number = data_set.get_variables().count_targets_number();

      NeuralNetwork neural_network(inputs_number, hidden_neurons_number, outputs_number);

      Inputs* inputs = neural_network.get_inputs_pointer();

      inputs->set_information(inputs_information);

      Outputs* outputs = neural_network.get_outputs_pointer();

      outputs->set_information(targets_information);

      neural_network.construct_scaling_layer();

      ScalingLayer* scaling_layer_pointer = neural_network.get_scaling_layer_pointer();

      scaling_layer_pointer->set_statistics(inputs_statistics);

      scaling_layer_pointer->set_scaling_method(ScalingLayer::NoScaling);

      neural_network.construct_unscaling_layer();

      UnscalingLayer* unscaling_layer_pointer = neural_network.get_unscaling_layer_pointer();

      unscaling_layer_pointer->set_statistics(targets_statistics);

      unscaling_layer_pointer->set_unscaling_method(UnscalingLayer::NoUnscaling);

	  // Performance functional

      PerformanceFunctional performance_functional(&neural_network, &data_set);

	  // Training strategy 

	  TrainingStrategy training_strategy(&performance_functional);

      QuasiNewtonMethod* quasi_Newton_method_pointer = training_strategy.get_quasi_Newton_method_pointer();

      quasi_Newton_method_pointer->set_maximum_iterations_number(1000);

      quasi_Newton_method_pointer->set_reserve_performance_history(true);

      quasi_Newton_method_pointer->set_display_period(100);

      TrainingStrategy::Results training_strategy_results = training_strategy.perform_training();

	  // Testing analysis
                  
      TestingAnalysis testing_analysis(&neural_network, &data_set);

      TestingAnalysis::LinearRegressionResults linear_regression_results = testing_analysis.perform_linear_regression_analysis();

      // Save results

      scaling_layer_pointer->set_scaling_method(ScalingLayer::MinimumMaximum);
      unscaling_layer_pointer->set_unscaling_method(UnscalingLayer::MinimumMaximum);

      data_set.save("../data/data_set.xml");

      neural_network.save("../data/neural_network.xml");
      neural_network.save_expression("../data/expression.txt");

      training_strategy.save("../data/training_strategy.xml");
      training_strategy_results.save("../data/training_strategy_results.dat");

      linear_regression_results.save("../data/linear_regression_analysis_results.dat");

      return(0);
   }
   catch(std::exception& e)
   {
      std::cerr << e.what() << std::endl;

      return(1);
   }
}  
Beispiel #28
0
int main(void)
{
   try
   {
      std::cout << "OpenNN. Airfoil Self-Noise Application." << std::endl;

      srand((unsigned)time(NULL));

      // Data set

      DataSet data_set;

#ifdef __APPLE__
      data_set.set_data_file_name("../../../../data/airfoil_self_noise.dat");
#else
      data_set.set_data_file_name("../data/airfoil_self_noise.dat");
#endif

      data_set.set_separator("Tab");

      data_set.load_data();

      // Variables

      Variables* variables_pointer = data_set.get_variables_pointer();

      Vector< Variables::Item > variables_items(6);

      variables_items[0].name = "frequency";
      variables_items[0].units = "hertzs";
      variables_items[0].use = Variables::Input;

      variables_items[1].name = "angle_of_attack";
      variables_items[1].units = "degrees";
      variables_items[1].use = Variables::Input;

      variables_items[2].name = "chord_length";
      variables_items[2].units = "meters";
      variables_items[2].use = Variables::Input;

      variables_items[3].name = "free_stream_velocity";
      variables_items[3].units = "meters per second";
      variables_items[3].use = Variables::Input;

      variables_items[4].name = "suction_side_displacement_thickness";
      variables_items[4].units = "meters";
      variables_items[4].use = Variables::Input;

      variables_items[5].name = "scaled_sound_pressure_level";
      variables_items[5].units = "decibels";
      variables_items[5].use = Variables::Target;

      variables_pointer->set_items(variables_items);

      const Matrix<std::string> inputs_information = variables_pointer->arrange_inputs_information();
      const Matrix<std::string> targets_information = variables_pointer->arrange_targets_information();

      // Instances

      Instances* instances_pointer = data_set.get_instances_pointer();

      instances_pointer->split_random_indices();

      const Vector< Statistics<double> > inputs_statistics = data_set.scale_inputs_minimum_maximum();
      const Vector< Statistics<double> > targets_statistics = data_set.scale_targets_minimum_maximum();

      // Neural network

      const size_t inputs_number = variables_pointer->count_inputs_number();
      const size_t hidden_perceptrons_number = 9;
      const size_t outputs_number = variables_pointer->count_targets_number();

      NeuralNetwork neural_network(inputs_number, hidden_perceptrons_number, outputs_number);

      Inputs* inputs = neural_network.get_inputs_pointer();

      inputs->set_information(inputs_information);

      Outputs* outputs = neural_network.get_outputs_pointer();

      outputs->set_information(targets_information);

      neural_network.construct_scaling_layer();

      ScalingLayer* scaling_layer_pointer = neural_network.get_scaling_layer_pointer();

      scaling_layer_pointer->set_statistics(inputs_statistics);

      scaling_layer_pointer->set_scaling_method(ScalingLayer::NoScaling);

      neural_network.construct_unscaling_layer();

      UnscalingLayer* unscaling_layer_pointer = neural_network.get_unscaling_layer_pointer();

      unscaling_layer_pointer->set_statistics(targets_statistics);

      unscaling_layer_pointer->set_unscaling_method(UnscalingLayer::NoUnscaling);

      // Performance functional

      PerformanceFunctional performance_functional(&neural_network, &data_set);

      performance_functional.set_regularization_type(PerformanceFunctional::NEURAL_PARAMETERS_NORM_REGULARIZATION);

      // Training strategy object

      TrainingStrategy training_strategy(&performance_functional);

      QuasiNewtonMethod* quasi_Newton_method_pointer = training_strategy.get_quasi_Newton_method_pointer();

      quasi_Newton_method_pointer->set_maximum_iterations_number(1000);
      quasi_Newton_method_pointer->set_display_period(10);

      quasi_Newton_method_pointer->set_minimum_performance_increase(1.0e-6);

      quasi_Newton_method_pointer->set_reserve_performance_history(true);

      TrainingStrategy::Results training_strategy_results = training_strategy.perform_training();

      // Testing analysis

      TestingAnalysis testing_analysis(&neural_network, &data_set);

      TestingAnalysis::LinearRegressionResults linear_regression_results = testing_analysis.perform_linear_regression_analysis();

      // Save results

      scaling_layer_pointer->set_scaling_method(ScalingLayer::MinimumMaximum);
      unscaling_layer_pointer->set_unscaling_method(UnscalingLayer::MinimumMaximum);

#ifdef __APPLE__
      data_set.save("../../../../data/data_set.xml");

      neural_network.save("../../../../data/neural_network.xml");
      neural_network.save_expression("../../../../data/expression.txt");

      performance_functional.save("../../../../data/performance_functional.xml");

      training_strategy.save("../../../../data/training_strategy.xml");
      training_strategy_results.save("../../../../data/training_strategy_results.dat");

      linear_regression_results.save("../../../../data/linear_regression_analysis_results.dat");
#else
      data_set.save("../data/data_set.xml");

      neural_network.save("../data/neural_network.xml");
      neural_network.save_expression("../data/expression.txt");

      performance_functional.save("../data/performance_functional.xml");

      training_strategy.save("../data/training_strategy.xml");
      training_strategy_results.save("../data/training_strategy_results.dat");

      linear_regression_results.save("../data/linear_regression_analysis_results.dat");
#endif

      return(0);
   }
   catch(std::exception& e)
   {
      std::cerr << e.what() << std::endl;

      return(1);
   }
}
Beispiel #29
0
void ToneMappingDeferred::run(const render::RenderContextPointer& renderContext, const Inputs& inputs) {

    auto lightingBuffer = inputs.get0()->getRenderBuffer(0);
    auto destFbo = inputs.get1();
    _toneMappingEffect.render(renderContext->args, lightingBuffer, destFbo);
}
Beispiel #30
0
void DebugLightClusters::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs) {
    if (!(doDrawClusterFromDepth || doDrawContent || doDrawGrid)) {
        return;
    }

    auto deferredTransform = inputs.get0();
    auto deferredFramebuffer = inputs.get1();
    auto lightingModel = inputs.get2();
    auto linearDepthTarget = inputs.get3();
    auto lightClusters = inputs.get4();

    auto args = renderContext->args;

    gpu::Batch batch;

    batch.enableStereo(false);


    // Assign the camera transform
    batch.setViewportTransform(args->_viewport);
    glm::mat4 projMat;
    Transform viewMat;
    args->getViewFrustum().evalProjectionMatrix(projMat);
    args->getViewFrustum().evalViewTransform(viewMat);
    batch.setProjectionTransform(projMat);
    batch.setViewTransform(viewMat, true);


    // Then the actual ClusterGrid attributes
    batch.setModelTransform(Transform());

    // Bind the Light CLuster data strucutre
    batch.setUniformBuffer(LIGHT_GPU_SLOT, lightClusters->_lightStage->_lightArrayBuffer);
    batch.setUniformBuffer(LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT, lightClusters->_frustumGridBuffer);
    batch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT, lightClusters->_clusterGridBuffer);
    batch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT, lightClusters->_clusterContentBuffer);



    if (doDrawClusterFromDepth) {
        batch.setPipeline(getDrawClusterFromDepthPipeline());
        batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, deferredTransform->getFrameTransformBuffer());

        if (linearDepthTarget) {
            batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, linearDepthTarget->getLinearDepthTexture());
        }

        batch.draw(gpu::TRIANGLE_STRIP, 4, 0);
              
        batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, nullptr);
        batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, nullptr);
    }

    if (doDrawContent) {

        // bind the one gpu::Pipeline we need
        batch.setPipeline(getDrawClusterContentPipeline());
        batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, deferredTransform->getFrameTransformBuffer());

        if (linearDepthTarget) {
            batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, linearDepthTarget->getLinearDepthTexture());
        }

        batch.draw(gpu::TRIANGLE_STRIP, 4, 0);
              
        batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, nullptr);
        batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, nullptr);
    }



    gpu::Batch drawGridAndCleanBatch;

    if (doDrawGrid) {
        // bind the one gpu::Pipeline we need
        drawGridAndCleanBatch.setPipeline(getDrawClusterGridPipeline());

        auto dims = lightClusters->_frustumGridBuffer->dims;
        glm::ivec3 summedDims(dims.x*dims.y * dims.z, dims.x*dims.y, dims.x);
        drawGridAndCleanBatch.drawInstanced(summedDims.x, gpu::LINES, 24, 0);
    }

    drawGridAndCleanBatch.setUniformBuffer(LIGHT_GPU_SLOT, nullptr);
    drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT, nullptr);
    drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT, nullptr);
    drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT, nullptr);

    drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
    drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
    drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
    drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);

    args->_context->appendFrameBatch(batch);
    args->_context->appendFrameBatch(drawGridAndCleanBatch);
}