void SliceExtractor::renderImageImpl(DataContainer& dataContainer, const ImageRepresentationGL::ScopedRepresentation& img) {

        // prepare OpenGL
        _shader->activate();
        cgt::TextureUnit inputUnit, tfUnit;
        img->bind(_shader, inputUnit);
        p_transferFunction.getTF()->bind(_shader, tfUnit);

        cgt::mat4 identity = cgt::mat4::identity;

        _shader->setUniform("_texCoordsMatrix", _texCoordMatrix);
        _shader->setUniform("_modelMatrix", identity);
        _shader->setUniform("_viewMatrix", _viewMatrix);
        _shader->setUniform("_projectionMatrix", identity);
        _shader->setUniform("_useTexturing", true);
        _shader->setUniform("_useSolidColor", true);

        // render slice
        FramebufferActivationGuard f*g(this);
        createAndAttachColorTexture();
        createAndAttachDepthTexture();
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        QuadRdr.renderQuad();

        if (p_renderCrosshair.getValue())
            renderCrosshair(img);

        renderGeometry(dataContainer, img);

        _shader->deactivate();
        cgt::TextureUnit::setZeroUnit();

        dataContainer.addData(p_targetImageID.getValue(), new RenderData(_fbo));
    }
    void MultiVolumeRaycaster::updateResult(DataContainer& dataContainer) {
        ImageRepresentationGL::ScopedRepresentation image1(dataContainer, p_sourceImage1.getValue());
        ImageRepresentationGL::ScopedRepresentation image2(dataContainer, p_sourceImage2.getValue());
        ImageRepresentationGL::ScopedRepresentation image3(dataContainer, p_sourceImage3.getValue());
        ScopedTypedData<CameraData> camera(dataContainer, p_camera.getValue());
        ScopedTypedData<RenderData> geometryImage(dataContainer, p_geometryImageId.getValue(), true);
        ScopedTypedData<LightSourceData> light(dataContainer, p_lightId.getValue());

        std::vector<const ImageRepresentationGL*> images;
        if (image1) {
            images.push_back(image1);

            if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY1){
                _vhm1->createHierarchy(image1, p_transferFunction1.getTF());
                validate(INVALID_VOXEL_HIERARCHY1);
            }
        }
        if (image2) {
            images.push_back(image2);

            if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY2){
                _vhm2->createHierarchy(image2, p_transferFunction2.getTF());
                validate(INVALID_VOXEL_HIERARCHY2);
            }
        }
        if (image3) {
            images.push_back(image3);

            if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY3){
                _vhm3->createHierarchy(image3, p_transferFunction3.getTF());
                validate(INVALID_VOXEL_HIERARCHY3);
            }
        }
        

        if (images.size() >= 3 && camera != nullptr) {
            auto eepp = computeEntryExitPoints(images, camera, geometryImage);
            dataContainer.addData(p_outputImageId.getValue() + ".entrypoints", eepp.first);
            dataContainer.addData(p_outputImageId.getValue() + ".exitpoints", eepp.second);

            auto rc = performRaycasting(dataContainer, images, camera, eepp.first, eepp.second, light);
            dataContainer.addData(p_outputImageId.getValue(), rc);
        }
        else {
            LDEBUG("No suitable input data found!");
        }
    }
void FiberReader::updateResult(DataContainer& dataContainer) {
    const std::string& fileName = p_url.getValue();
    if (cgt::FileSystem::fileExtension(fileName) == "trk") {
        dataContainer.addData(p_outputId.getValue(), readTrkFile(fileName));
    }
    else {
        LERROR("Unknown file extension.");
    }
}
    void RawImageReader::updateResult(DataContainer& data) {
        size_t dimensionality = 3;
        if (p_size.getValue().z == 1) {
            dimensionality = (p_size.getValue().y == 1) ? 1 : 2;
        }

        ImageData* image = new ImageData(dimensionality, p_size.getValue(), p_numChannels.getValue());
        ImageRepresentationDisk::create(image, p_url.getValue(), p_baseType.getOptionValue(), p_offset.getValue(), p_endianness.getOptionValue());
        image->setMappingInformation(ImageMappingInformation(p_size.getValue(), p_imageOffset.getValue(), p_voxelSize.getValue()));
        data.addData(p_targetImageID.getValue(), image);
    }
    void TextRenderer::updateResult(DataContainer& data) {
        if (_atlas == nullptr)
            return;

        FramebufferActivationGuard f*g(this);
        createAndAttachColorTexture();
        createAndAttachDepthTexture();

        const cgt::mat4 trafoMatrix = cgt::mat4::createTranslation(cgt::vec3(-1.f, -1.f, 0.f)) * cgt::mat4::createScale(cgt::vec3(2.f / _viewportSizeProperty->getValue().x, 2.f / _viewportSizeProperty->getValue().y, 1.f));
        cgt::vec2 pos(static_cast<float>(p_position.getValue().x), static_cast<float>(_viewportSizeProperty->getValue().y - p_position.getValue().y));
        _atlas->renderText(p_text.getValue(), pos, p_color.getValue(), trafoMatrix);

        data.addData(p_outputImage.getValue(), new RenderData(_fbo));
    }
void GlGradientVolumeGenerator::updateResult(DataContainer& data) {
    ImageRepresentationGL::ScopedRepresentation img(data, p_inputImage.getValue());

    if (img != 0) {
        const cgt::svec3& size = img->getSize();

        cgt::TextureUnit inputUnit;
        inputUnit.activate();

        // create texture for result
        cgt::Texture* resultTexture = new cgt::Texture(GL_TEXTURE_3D, cgt::ivec3(size), GL_RGB16F, cgt::Texture::LINEAR);

        // activate shader and bind textures
        _shader->activate();
        img->bind(_shader, inputUnit);

        // activate FBO and attach texture
        _fbo->activate();
        glViewport(0, 0, static_cast<GLsizei>(size.x), static_cast<GLsizei>(size.y));

        // render quad to compute difference measure by shader
        for (int z = 0; z < static_cast<int>(size.z); ++z) {
            float zTexCoord = static_cast<float>(z)/static_cast<float>(size.z) + .5f/static_cast<float>(size.z);
            _shader->setUniform("_zTexCoord", zTexCoord);
            _fbo->attachTexture(resultTexture, GL_COLOR_ATTACHMENT0, 0, z);
            QuadRdr.renderQuad();
        }
        _fbo->detachAll();
        _fbo->deactivate();
        _shader->deactivate();

        // put resulting image into DataContainer
        ImageData* id = new ImageData(3, size, 3);
        ImageRepresentationGL::create(id, resultTexture);
        id->setMappingInformation(img->getParent()->getMappingInformation());
        data.addData(p_outputImage.getValue(), id);

        cgt::TextureUnit::setZeroUnit();
        LGL_ERROR;
    }
    else {
        LDEBUG("No suitable input image found.");
    }
}
    void CudaConfidenceMapsSolver::updateResult(DataContainer& data) {

        ImageRepresentationLocal::ScopedRepresentation img(data, p_inputImage.getValue());
        if (img != 0) {
            bool use8Neighbourhood = p_use8Neighbourhood.getValue();
            float gradientScaling = p_gradientScaling.getValue();
            float alpha = p_paramAlpha.getValue();
            float beta = p_paramBeta.getValue();
            float gamma = p_paramGamma.getValue();

            // Setup the solver with the current Alpha-Beta-Filter settings
            _solver.enableAlphaBetaFilter(p_useAlphaBetaFilter.getValue());
            _solver.setAlphaBetaFilterParameters(p_filterAlpha.getValue(), p_filterBeta.getValue());

            cgt::ivec3 size = img->getSize();
            auto image = (unsigned char*)img->getWeaklyTypedPointer()._pointer;

            // Copy the image on the GPU and generate the equation system
            _solver.uploadImage(image, size.x, size.y, gradientScaling, alpha, beta, gamma, use8Neighbourhood);

            // Solve the equation system using Conjugate Gradient
            if (p_useFixedIterationCount.getValue()) {
                _solver.solveWithFixedIterationCount(p_iterationBudget.getValue());
            }
            else {
                _solver.solveWithFixedTimeBudget(p_millisecondBudget.getValue());
            }

            const float *solution = _solver.getSolution(size.x, size.y);

            // FIXME: Instead of copying the solution to a local representation first it would make
            // sense to directly create an opengl representation!
            ImageData *id = new ImageData(img->getParent()->getDimensionality(), size, img->getParent()->getNumChannels());
            cgt::Texture* resultTexture = new cgt::Texture(GL_TEXTURE_2D, size, GL_R32F, cgt::Texture::LINEAR);
            resultTexture->setWrapping(cgt::Texture::MIRRORED_REPEAT);
            resultTexture->uploadTexture(reinterpret_cast<const GLubyte*>(solution), GL_RED, GL_FLOAT);
            ImageRepresentationGL::create(id, resultTexture);
            id->setMappingInformation(img->getParent()->getMappingInformation());
            data.addData(p_outputConfidenceMap.getValue(), id);
        }
    }
    void ViewportSplitter::render(DataContainer& dataContainer) {
        cgt::vec2 vps(p_viewportSizeProperty->getValue());
        cgt::vec2 evps(p_subViewViewportSize.getValue());

        cgt::TextureUnit rtUnit, colorUnit, depthUnit;
        rtUnit.activate();
        cgt::Texture* tex = new cgt::Texture(GL_TEXTURE_2D, cgt::ivec3(p_viewportSizeProperty->getValue(), 1), GL_RGBA8, cgt::Texture::LINEAR);
        tex->setWrapping(cgt::Texture::CLAMP_TO_EDGE);

        _fbo->activate();
        _fbo->attachTexture(tex, GL_COLOR_ATTACHMENT0);
        glViewport(0, 0, static_cast<GLsizei>(vps.x), static_cast<GLsizei>(vps.y));

        _copyShader->activate();
        _copyShader->setUniform("_projectionMatrix", cgt::mat4::createOrtho(0, vps.x, vps.y, 0, -1, 1));
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

        for (size_t i = 0; i < _numSubViews; ++i) {
            if (p_inputImageIds[i] != nullptr) {
                ScopedTypedData<RenderData> rd(dataContainer, p_inputImageIds[i]->getValue());
                if (rd != nullptr) {
                    rd->bind(_copyShader, colorUnit, depthUnit);

                    _copyShader->setUniform("_modelMatrix", cgt::mat4::createScale(cgt::vec3(evps.x, evps.y, .5f)));
                    if (_splitMode == HORIZONTAL)
                        _copyShader->setUniform("_viewMatrix", cgt::mat4::createTranslation(cgt::vec3(float(i) * evps.x, 0.f, 0.f)));
                    else if (_splitMode == VERTICAL)
                        _copyShader->setUniform("_viewMatrix", cgt::mat4::createTranslation(cgt::vec3(0.f, float(_numSubViews - i - 1) * evps.y, 0.f)));

                    _quad->render(GL_TRIANGLE_FAN);
                }
            }
        }

        _copyShader->deactivate();
        dataContainer.addData(p_outputImageId.getValue(), new RenderData(_fbo));

        _fbo->detachAll();
        _fbo->deactivate();
    }
    void SimpleRaycaster::processImpl(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image) {
        ScopedTypedData<LightSourceData> light(data, p_lightId.getValue());

        if (p_enableShading.getValue() == false || light != nullptr) {
            FramebufferActivationGuard f*g(this);
            createAndAttachTexture(GL_RGBA8);
            createAndAttachTexture(GL_RGBA32F);
            createAndAttachTexture(GL_RGBA32F);
            createAndAttachDepthTexture();

            static const GLenum buffers[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 , GL_COLOR_ATTACHMENT2 };
            glDrawBuffers(3, buffers);

            if (p_enableShading.getValue() && light != nullptr) {
                light->bind(_shader, "_lightSource");
            }
            if (p_enableShadowing.getValue()) {
                _shader->setUniform("_shadowIntensity", p_shadowIntensity.getValue());
            }

            glEnable(GL_DEPTH_TEST);
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
            QuadRdr.renderQuad();

            // restore state
            glDrawBuffers(1, buffers);
            glDisable(GL_DEPTH_TEST);
            LGL_ERROR;

            data.addData(p_targetImageID.getValue(), new RenderData(_fbo));


        
        }
        else {
            LDEBUG("Could not load light source from DataContainer.");
        }
    }
    void VirtualMirrorCombine::updateResult(DataContainer& data) {
        ScopedTypedData<RenderData> normalImage(data, p_normalImageID.getValue());
        ScopedTypedData<RenderData> mirrorImage(data, p_mirrorImageID.getValue());
        ScopedTypedData<RenderData> mirrorRendered(data, p_mirrorRenderID.getValue());

        if (normalImage != 0 && mirrorImage != 0 && mirrorRendered != 0) {
            glEnable(GL_DEPTH_TEST);
            glDepthFunc(GL_ALWAYS);

            FramebufferActivationGuard f*g(this);
            createAndAttachColorTexture();
            createAndAttachDepthTexture();

            _shader->activate();
            decorateRenderProlog(data, _shader);

            cgt::TextureUnit normalColorUnit, normalDepthUnit, mirrorColorUnit, mirrorDepthUnit, mirrorRenderedDepthUnit;
            normalImage->bind(_shader, normalColorUnit, normalDepthUnit, "_normalColor", "_normalDepth", "_normalTexParams");
            mirrorImage->bind(_shader, mirrorColorUnit, mirrorDepthUnit, "_mirrorColor", "_mirrorDepth", "_mirrorTexParams");
            mirrorRendered->bindDepthTexture(_shader, mirrorRenderedDepthUnit, "_mirrorRenderedDepth", "_mirrorRenderedTexParams");

            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
            QuadRdr.renderQuad();

            _shader->deactivate();
            cgt::TextureUnit::setZeroUnit();
            glDepthFunc(GL_LESS);
            glDisable(GL_DEPTH_TEST);
            LGL_ERROR;

            data.addData(p_targetImageID.getValue(), new RenderData(_fbo));
        }
        else {
            LDEBUG("No suitable input images found.");
        }
    }
    void ItkReader::ReadImageDirect(DataContainer& data) {
        typedef itk::ImageIOBase::IOComponentType ScalarPixelType;

        itk::ImageIOBase::Pointer imageIO =
            itk::ImageIOFactory::CreateImageIO(p_url.getValue().c_str(), itk::ImageIOFactory::ReadMode);

        if (imageIO.IsNotNull())
        {
            WeaklyTypedPointer wtp;

            imageIO->SetFileName(p_url.getValue());
            imageIO->ReadImageInformation();

            const ScalarPixelType pixelType = imageIO->GetComponentType();
            const size_t numDimensions = imageIO->GetNumberOfDimensions();

            LDEBUG("Reading Image with Reader " << imageIO->GetNameOfClass());
            LDEBUG("Pixel Type is " << imageIO->GetComponentTypeAsString(pixelType));
            LDEBUG("numDimensions: " << numDimensions);

            if (numDimensions > 3) {
                LERROR("Error: Dimensions higher than 3 not supported!");
                return;
            }

            itk::ImageIORegion ioRegion(numDimensions);
            itk::ImageIORegion::IndexType ioStart = ioRegion.GetIndex();
            itk::ImageIORegion::SizeType ioSize = ioRegion.GetSize();

            cgt::vec3 imageOffset(0.f);
            cgt::vec3 voxelSize(1.f);
            cgt::ivec3 size_i(1);

            //we assured above that numDimensions is < 3
            for (int i = 0; i < static_cast<int>(numDimensions); i++) {
                size_i[i] = imageIO->GetDimensions(i);
                imageOffset[i] = imageIO->GetOrigin(i);
                voxelSize[i] = imageIO->GetSpacing(i);
                ioStart[i] = 0;
                ioSize[i] = size_i[i];
            }

            cgt::svec3 size(size_i);
            size_t dimensionality = (size_i[2] == 1) ? ((size_i[1] == 1) ? 1 : 2) : 3;

            LDEBUG("Image Size is " << size);
            LDEBUG("Voxel Size is " << voxelSize);
            LDEBUG("Image Offset is " << imageOffset);
            LDEBUG("component size: " << imageIO->GetComponentSize());
            LDEBUG("components: " << imageIO->GetNumberOfComponents());
            LDEBUG("pixel type (string): " << imageIO->GetPixelTypeAsString(imageIO->GetPixelType())); // 'vector'
            LDEBUG("pixel type: " << imageIO->GetPixelType()); // '5'

            switch (pixelType) {
            case itk::ImageIOBase::CHAR:
                wtp._baseType = WeaklyTypedPointer::INT8; break;
            case itk::ImageIOBase::UCHAR:
                wtp._baseType = WeaklyTypedPointer::UINT8; break;
            case itk::ImageIOBase::SHORT:
                wtp._baseType = WeaklyTypedPointer::INT16; break;
            case itk::ImageIOBase::USHORT:
                wtp._baseType = WeaklyTypedPointer::UINT16; break;
            case itk::ImageIOBase::INT:
                wtp._baseType = WeaklyTypedPointer::INT32; break;
            case itk::ImageIOBase::UINT:
                wtp._baseType = WeaklyTypedPointer::UINT32; break;
            case itk::ImageIOBase::DOUBLE:
                LWARNING("Pixel Type is DOUBLE. Conversion to float may result in loss of precision!");
            case itk::ImageIOBase::FLOAT:
                wtp._baseType = WeaklyTypedPointer::FLOAT; break;


            default:
                LERROR("Error while loading ITK image: unsupported type: " << pixelType);
                return;
            }

            wtp._numChannels = imageIO->GetNumberOfComponents();

            //Setup the image region to read
            ioRegion.SetIndex(ioStart);
            ioRegion.SetSize(ioSize);
            imageIO->SetIORegion(ioRegion);

            if (pixelType != itk::ImageIOBase::DOUBLE) {
                //Finally, allocate buffer and read the image data
                wtp._pointer = new uint8_t[imageIO->GetImageSizeInBytes()];
                imageIO->Read(wtp._pointer);
            }
            else {
                //convert float volume to double volume
                double * inputBuf = new double[imageIO->GetImageSizeInComponents()];
                wtp._pointer = new uint8_t[imageIO->GetImageSizeInComponents() * sizeof(float)];
                imageIO->Read(inputBuf);

                double * dptr = inputBuf;
                float * fptr = static_cast<float*>(wtp._pointer);
                for (int i = 0, s = imageIO->GetImageSizeInComponents(); i < s; ++i) {
                    *fptr = *dptr;
                    fptr++;
                    dptr++;
                }
                delete[] inputBuf;
            }

            ImageData* image = new ImageData(dimensionality, size, wtp._numChannels);
            ImageRepresentationLocal::create(image, wtp);

            image->setMappingInformation(ImageMappingInformation(size, imageOffset/* + p_imageOffset.getValue()*/, voxelSize /** p_voxelSize.getValue()*/));
            data.addData(p_targetImageID.getValue(), image);
        }
        else {
            LWARNING("Unable to create ImageIO Instance; No suitable reader found!");
        }
    }
    void ItkReader::ReadImageSeries(DataContainer& data) {
        typedef itk::ImageIOBase::IOComponentType ScalarPixelType;

        std::vector<std::string> imageFileNames = GetImageFileNames();

        if (!imageFileNames.size())
            return;

        itk::ImageIOBase::Pointer imageIO =
            itk::ImageIOFactory::CreateImageIO(imageFileNames[0].c_str(), itk::ImageIOFactory::ReadMode);

        const int numSlices = imageFileNames.size();

        if (imageIO.IsNotNull())
        {
            WeaklyTypedPointer wtp;

            imageIO->SetFileName(imageFileNames[0]);
            imageIO->ReadImageInformation();

            const ScalarPixelType pixelType = imageIO->GetComponentType();
            const size_t numDimensions = imageIO->GetNumberOfDimensions();

            LDEBUG("Reading Image with Reader " << imageIO->GetNameOfClass());
            LDEBUG("Pixel Type is " << imageIO->GetComponentTypeAsString(pixelType));
            LDEBUG("numDimensions: " << numDimensions);

            if (numDimensions > 3) {
                LERROR("Error: Dimensions higher than 3 not supported!");
                return;
            }

            itk::ImageIORegion ioRegion(numDimensions);
            itk::ImageIORegion::IndexType ioStart = ioRegion.GetIndex();
            itk::ImageIORegion::SizeType ioSize = ioRegion.GetSize();

            cgt::vec3 imageOffset(0.f);
            cgt::vec3 voxelSize(1.f);
            cgt::ivec3 size_i(1);

            //we assured above that numDimensions is < 3
            for (int i = 0; i < static_cast<int>(numDimensions); i++) {
                size_i[i] = imageIO->GetDimensions(i);
                imageOffset[i] = imageIO->GetOrigin(i);
                voxelSize[i] = imageIO->GetSpacing(i);
                ioStart[i] = 0;
                ioSize[i] = size_i[i];
            }

            cgt::svec3 size(size_i);
            size_t dimensionality = (size_i[2] == 1) ? ((size_i[1] == 1) ? 1 : 2) : 3;
            if (dimensionality > 2) {
                LERROR("Error: Cannot load image series with more than two dimensions!");
                return;
            }

            LDEBUG("Image Size is " << size);
            LDEBUG("Voxel Size is " << voxelSize);
            LDEBUG("Image Offset is " << imageOffset);
            LDEBUG("component size: " << imageIO->GetComponentSize());
            LDEBUG("components: " << imageIO->GetNumberOfComponents());
            LDEBUG("pixel type (string): " << imageIO->GetPixelTypeAsString(imageIO->GetPixelType()));
            LDEBUG("pixel type: " << imageIO->GetPixelType());

            switch (pixelType) {
            case itk::ImageIOBase::CHAR:
                wtp._baseType = WeaklyTypedPointer::INT8; break;
            case itk::ImageIOBase::UCHAR:
                wtp._baseType = WeaklyTypedPointer::UINT8; break;
            case itk::ImageIOBase::SHORT:
                wtp._baseType = WeaklyTypedPointer::INT16; break;
            case itk::ImageIOBase::USHORT:
                wtp._baseType = WeaklyTypedPointer::UINT16; break;
            case itk::ImageIOBase::INT:
                wtp._baseType = WeaklyTypedPointer::INT32; break;
            case itk::ImageIOBase::UINT:
                wtp._baseType = WeaklyTypedPointer::UINT32; break;
            case itk::ImageIOBase::DOUBLE:
                LWARNING("Pixel Type is DOUBLE. Conversion to float may result in loss of precision!");
            case itk::ImageIOBase::FLOAT:
                wtp._baseType = WeaklyTypedPointer::FLOAT; break;


            default:
                LERROR("Error while loading ITK image: unsupported type: " << pixelType);
                return;
            }

            wtp._numChannels = imageIO->GetNumberOfComponents();

            //Setup the image region to read
            ioRegion.SetIndex(ioStart);
            ioRegion.SetSize(ioSize);
            imageIO->SetIORegion(ioRegion);

            //allocate a temporary buffer if necessary
            double* inputBuf = (pixelType == itk::ImageIOBase::DOUBLE) ? new double[imageIO->GetImageSizeInComponents()] : nullptr;
            size_t sliceSize = (pixelType == itk::ImageIOBase::DOUBLE) ? imageIO->GetImageSizeInComponents() * sizeof(float) : imageIO->GetImageSizeInBytes();
            wtp._pointer = new uint8_t[numSlices * sliceSize];
            for (int idx = 0; idx < numSlices; ++idx) {
                itk::ImageIOBase::Pointer fileIO = imageIO;
                    //itk::ImageIOFactory::CreateImageIO(imageFileNames[idx].c_str(), itk::ImageIOFactory::ReadMode);
                fileIO->SetFileName(imageFileNames[idx]);
                fileIO->ReadImageInformation();
                fileIO->SetIORegion(ioRegion);

                size_t currentSliceSize = (pixelType == itk::ImageIOBase::DOUBLE) ? imageIO->GetImageSizeInComponents() * sizeof(float) : fileIO->GetImageSizeInBytes();
                if (currentSliceSize != sliceSize) {
                    LERROR("Image " << imageFileNames[idx] << " has different dimensionality or data type!");
                    delete static_cast<uint8_t*>(wtp._pointer);
                    delete inputBuf;
                    wtp._pointer = nullptr;
                    return;
                }

                uint8_t* sliceBuffer = static_cast<uint8_t*>(wtp._pointer) + idx * sliceSize;

                if (pixelType != itk::ImageIOBase::DOUBLE) {
                    // directly read slice into buffer
                    fileIO->Read(sliceBuffer);
                }
                else {
                    //convert float volume to double volume
                    fileIO->Read(inputBuf);

                    double* dptr = inputBuf;
                    float* fptr = reinterpret_cast<float*>(sliceBuffer);
                    for (int i = 0, s = fileIO->GetImageSizeInComponents(); i < s; ++i) {
                        *fptr = static_cast<float>(*dptr);
                        fptr++;
                        dptr++;
                    }
                }
            }
            delete[] inputBuf;

            size[2] = numSlices;
            //series adds one dimension
            ImageData* image = new ImageData(dimensionality+1, size, wtp._numChannels);
            ImageRepresentationLocal::create(image, wtp);

            image->setMappingInformation(ImageMappingInformation(size, imageOffset/* + p_imageOffset.getValue()*/, voxelSize /** p_voxelSize.getValue()*/));
            data.addData(p_targetImageID.getValue(), image);
        }
        else {
            LWARNING("Unable to create ImageIO Instance; No suitable reader found!");
        }
    }
    void GlGaussianFilter::updateResult(DataContainer& data) {
        ImageRepresentationGL::ScopedRepresentation img(data, p_inputImage.getValue());

        if (img != 0) {
            if (img->getParent()->getDimensionality() > 1) {
                cgt::ivec3 size = img->getSize();
                int halfKernelSize = static_cast<int>(2.5 * p_sigma.getValue());
                cgtAssert(halfKernelSize < MAX_HALF_KERNEL_SIZE, "halfKernelSize too big -> kernel uniform buffer will be out of bounds!")

                cgt::TextureUnit inputUnit, kernelUnit;
                inputUnit.activate();

                // create texture for result
                cgt::Texture* resultTextures[2];
                for (size_t i = 0; i < 2; ++i) {
                    resultTextures[i] = new cgt::Texture(img->getTexture()->getType(), size, img->getTexture()->getInternalFormat(), cgt::Texture::LINEAR);
                }

                // we need to distinguish 2D and 3D case
                cgt::Shader* leShader = (size.z == 1) ? _shader2D : _shader3D;

                // activate shader
                leShader->activate();
                leShader->setUniform("_halfKernelSize", halfKernelSize);

                // bind kernel buffer texture
                kernelUnit.activate();
                glBindTexture(GL_TEXTURE_BUFFER, _kernelBufferTexture);
                glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, _kernelBuffer->getId());
                leShader->setUniform("_kernel", kernelUnit.getUnitNumber());
                LGL_ERROR;

                // activate FBO and attach texture
                _fbo->activate();
                glViewport(0, 0, static_cast<GLsizei>(size.x), static_cast<GLsizei>(size.y));

                // start 3 passes of convolution: in X, Y and Z direction:
                {
                    // X pass
                    leShader->setUniform("_direction", cgt::ivec3(1, 0, 0));
                    img->bind(leShader, inputUnit);

                    // render quad to compute difference measure by shader
                    for (int z = 0; z < size.z; ++z) {
                        float zTexCoord = static_cast<float>(z)/static_cast<float>(size.z) + .5f/static_cast<float>(size.z);
                        if (size.z > 1)
                            leShader->setUniform("_zTexCoord", zTexCoord);
                        _fbo->attachTexture(resultTextures[0], GL_COLOR_ATTACHMENT0, 0, z);
                        LGL_ERROR;
                        QuadRdr.renderQuad();
                    }
                }
                {
                    // Y pass
                    leShader->setUniform("_direction", cgt::ivec3(0, 1, 0));
                    inputUnit.activate();
                    resultTextures[0]->bind();

                    // render quad to compute difference measure by shader
                    for (int z = 0; z < size.z; ++z) {
                        float zTexCoord = static_cast<float>(z)/static_cast<float>(size.z) + .5f/static_cast<float>(size.z);
                        if (size.z > 1)
                            leShader->setUniform("_zTexCoord", zTexCoord);
                        _fbo->attachTexture(resultTextures[1], GL_COLOR_ATTACHMENT0, 0, z);
                        LGL_ERROR;
                        QuadRdr.renderQuad();
                    }
                }
                // we need the third pass only in the 3D case
                if (size.z > 1) {
                    // Z pass
                    leShader->setUniform("_direction", cgt::ivec3(0, 0, 1));
                    inputUnit.activate();
                    resultTextures[1]->bind();

                    // render quad to compute difference measure by shader
                    for (int z = 0; z < size.z; ++z) {
                        float zTexCoord = static_cast<float>(z)/static_cast<float>(size.z) + .5f/static_cast<float>(size.z);
                        leShader->setUniform("_zTexCoord", zTexCoord);
                        _fbo->attachTexture(resultTextures[0], GL_COLOR_ATTACHMENT0, 0, z);
                        LGL_ERROR;
                        QuadRdr.renderQuad();
                    }
                }
                else {
                    // in the 2D case we just swap the result textures, so that we write the correct image out in the lines below.
                    std::swap(resultTextures[0], resultTextures[1]);
                }

                _fbo->detachAll();
                _fbo->deactivate();
                leShader->deactivate();

                // put resulting image into DataContainer
                ImageData* id = new ImageData(img->getParent()->getDimensionality(), size, img->getParent()->getNumChannels());
                ImageRepresentationGL::create(id, resultTextures[0]);
                id->setMappingInformation(img->getParent()->getMappingInformation());
                data.addData(p_outputImage.getValue(), id);

                delete resultTextures[1];

                cgt::TextureUnit::setZeroUnit();
                LGL_ERROR;
            }
            else {
                LERROR("Supports only 2D and 3D Gaussian Blur.");
            }
        }
        else {
            LDEBUG("No suitable input image found.");
        }
    }
    void SliceRenderer3D::updateResult(DataContainer& data) {
		std::cout << "Entering updateResult of SliceRenderer3D " << std::endl;
		ImageRepresentationGL::ScopedRepresentation img(data, p_sourceImageID.getValue());
        ScopedTypedData<CameraData> camera(data, p_camera.getValue());

        if (img != nullptr && camera != nullptr) {
            if (img->getDimensionality() == 3) {
                const cgt::Camera& cam = camera->getCamera();

                // Creating the slice proxy geometry works as follows:
                // Create the cube proxy geometry for the volume, then clip the cube against the slice plane.
                // The closing face is the slice proxy geometry.
                // This is probably not the fastest, but an elegant solution, which also supports arbitrary slice orientations. :)
                cgt::Bounds volumeExtent = img->getParent()->getWorldBounds();
                std::unique_ptr<MeshGeometry> cube = GeometryDataFactory::createCube(volumeExtent, cgt::Bounds(cgt::vec3(0.f), cgt::vec3(1.f)));

                cgt::vec3 normal(0.f, 0.f, 0.f);
                float p = 0.0f;

				switch (p_sliceOrientation.getOptionValue()) {
				case XY_PLANE:
					normal = cgt::vec3(0.f, 0.f, 1.f);
					p = img->getParent()->getMappingInformation().getOffset().z + (p_sliceNumber.getValue() * img->getParent()->getMappingInformation().getVoxelSize().z);
					break;
				case XZ_PLANE:
					normal = cgt::vec3(0.f, 1.f, 0.f);
					p = img->getParent()->getMappingInformation().getOffset().y + (p_sliceNumber.getValue() * img->getParent()->getMappingInformation().getVoxelSize().y);
					break;
				case YZ_PLANE:
					normal = cgt::vec3(1.f, 0.f, 0.f);
					p = img->getParent()->getMappingInformation().getOffset().x + (p_sliceNumber.getValue() * img->getParent()->getMappingInformation().getVoxelSize().x);
					break;
				}
				MeshGeometry clipped = cube->clipAgainstPlane(p, normal, true);
				const FaceGeometry& slice = clipped.getFaces().back(); // the last face is the closing face

				glEnable(GL_DEPTH_TEST);
                _shader->activate();

                _shader->setIgnoreUniformLocationError(true);
                _shader->setUniform("_viewportSizeRCP", 1.f / cgt::vec2(getEffectiveViewportSize()));
                _shader->setUniform("_projectionMatrix", cam.getProjectionMatrix());
                _shader->setUniform("_viewMatrix", cam.getViewMatrix());

                cgt::TextureUnit inputUnit, tfUnit;
                img->bind(_shader, inputUnit);
                p_transferFunction.getTF()->bind(_shader, tfUnit);

                FramebufferActivationGuard f*g(this);
                createAndAttachColorTexture();
                createAndAttachDepthTexture();
                glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
                slice.render(GL_TRIANGLE_FAN);

                _shader->deactivate();
                cgt::TextureUnit::setZeroUnit();
                glDisable(GL_DEPTH_TEST);

                data.addData(p_targetImageID.getValue(), new RenderData(_fbo));
            }
            else {
                LERROR("Input image must have dimensionality of 3.");
            }
        }
        else {
            LDEBUG("No suitable input image found.");
        }
		std::cout << "Exiting updateResult of SliceRenderer3D " << std::endl;
	}
    void SliceRenderer2D::updateResult(DataContainer& data) {
        ImageRepresentationGL::ScopedRepresentation img(data, p_sourceImageID.getValue());

        if (img != 0) {
            if (img->getDimensionality() == 2) {
                cgt::vec3 imgSize(img->getSize());
             
                float renderTargetRatio = static_cast<float>(getEffectiveViewportSize().x) / static_cast<float>(getEffectiveViewportSize().y);

                cgt::vec2 topLeft_px(static_cast<float>(p_cropLeft.getValue()), static_cast<float>(p_cropTop.getValue()));
                cgt::vec2 bottomRight_px(static_cast<float>(imgSize.x - p_cropRight.getValue()), static_cast<float>(imgSize.y - p_cropBottom.getValue()));
                cgt::vec2 croppedSize = bottomRight_px - topLeft_px;

                float sliceRatio =
                    (static_cast<float>(croppedSize.x) * img.getImageData()->getMappingInformation().getVoxelSize().x)
                    / (static_cast<float>(croppedSize.y) * img.getImageData()->getMappingInformation().getVoxelSize().y);
       
                // configure model matrix so that slices are rendered with correct aspect posNormalized
                float ratioRatio = sliceRatio / renderTargetRatio;
                cgt::mat4 viewMatrix = (ratioRatio > 1) ? cgt::mat4::createScale(cgt::vec3(1.f, 1.f / ratioRatio, 1.f)) : cgt::mat4::createScale(cgt::vec3(ratioRatio, 1.f, 1.f));
                viewMatrix.t11 *= -1;

                // prepare OpenGL
                _shader->activate();
                cgt::TextureUnit inputUnit, tfUnit;
                img->bind(_shader, inputUnit);
                p_transferFunction.getTF()->bind(_shader, tfUnit);

                if (p_invertXAxis.getValue())
                    viewMatrix *= cgt::mat4::createScale(cgt::vec3(-1, 1, 1));

                if (p_invertYAxis.getValue())
                    viewMatrix *= cgt::mat4::createScale(cgt::vec3(1, -1, 1));


                cgt::vec2 topLeft = topLeft_px / imgSize.xy();
                cgt::vec2 bottomRight = bottomRight_px / imgSize.xy();

                _shader->setUniform("_viewMatrix", viewMatrix);
                _shader->setUniform("_topLeft", topLeft);
                _shader->setUniform("_bottomRight", bottomRight);

                // render slice
                FramebufferActivationGuard f*g(this);
                createAndAttachColorTexture();
                createAndAttachDepthTexture();
                glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
                QuadRdr.renderQuad();


                _shader->deactivate();
                cgt::TextureUnit::setZeroUnit();

                data.addData(p_targetImageID.getValue(), new RenderData(_fbo));
            }
            else {
                LERROR("Input image must have dimensionality of 2.");
            }
        }
        else {
            LDEBUG("No suitable input image found.");
        }
    }
    void DevilImageReader::updateResult(DataContainer& data) {
        const std::string& url = p_url.getValue();
        std::string directory = cgt::FileSystem::dirName(url);
        std::string base = cgt::FileSystem::baseName(url);
        std::string ext = cgt::FileSystem::fileExtension(url);

        // check whether we open an image series
        size_t suffixPos = base.find_last_not_of("0123456789");
        if (suffixPos != std::string::npos)
            ++suffixPos;
        size_t suffixLength = (suffixPos == std::string::npos) ? 0 : base.length() - suffixPos;

        // assemble the list of files to read
        std::vector<std::string> files;
        if (suffixLength == 0 || !p_importSimilar.getValue()) {
            files.push_back(url);
        }
        else {
            std::string prefix = base.substr(0, suffixPos);
            int index = StringUtils::fromString<int>(base.substr(suffixPos));

            while (cgt::FileSystem::fileExists(directory + "/" + prefix + StringUtils::toString(index, suffixLength, '0') + "." + ext)) {
                files.push_back(directory + "/" + prefix + StringUtils::toString(index, suffixLength, '0') + "." + ext);
                ++index;
            }
        }

        if (files.empty())
            return;

        cgt::ivec3 imageSize(0, 0, static_cast<int>(files.size()));
        uint8_t* buffer = nullptr;

        ILint devilFormat = 0;
        if (p_importType.getOptionValue() == "localIntensity") 
            devilFormat = IL_LUMINANCE;
        else if (p_importType.getOptionValue() == "localIntensity3") 
            devilFormat = IL_RGB;
        else if (p_importType.getOptionValue() == "rt") 
            devilFormat = IL_RGBA;

        ILint devilDataType = 0;
        WeaklyTypedPointer::BaseType campvisDataType = WeaklyTypedPointer::UINT8;
        size_t numChannels = 1;

        // start reading
        for (size_t i = 0; i < files.size(); ++i) {
            // prepare DevIL
            ILuint img;
            ilGenImages(1, &img);
            ilBindImage(img);

            // try load file
            if (! ilLoadImage(files[i].c_str())) {
                LERROR("Could not load image: " << files[i]);
                delete [] buffer;
                return;
            }

            // prepare buffer and perform dimensions check
            if (i == 0) {
                imageSize.x = ilGetInteger(IL_IMAGE_WIDTH);
                imageSize.y = ilGetInteger(IL_IMAGE_HEIGHT);

                if (devilFormat == 0)
                    devilFormat = ilGetInteger(IL_IMAGE_FORMAT);

                switch (ilGetInteger(IL_IMAGE_TYPE)) {
                    case IL_UNSIGNED_BYTE:
                        devilDataType = IL_UNSIGNED_BYTE;
                        campvisDataType = WeaklyTypedPointer::UINT8;
                        break;
                    case IL_BYTE:
                        devilDataType = IL_BYTE;
                        campvisDataType = WeaklyTypedPointer::INT8;
                        break;
                    case IL_UNSIGNED_SHORT:
                        devilDataType = IL_UNSIGNED_SHORT;
                        campvisDataType = WeaklyTypedPointer::UINT16;
                        break;
                    case IL_SHORT:
                        devilDataType = IL_SHORT;
                        campvisDataType = WeaklyTypedPointer::INT16;
                        break;
                    case IL_UNSIGNED_INT:
                        devilDataType = IL_UNSIGNED_INT;
                        campvisDataType = WeaklyTypedPointer::UINT32;
                        break;
                    case IL_INT:
                        devilDataType = IL_INT;
                        campvisDataType = WeaklyTypedPointer::INT32;
                        break;
                    case IL_FLOAT:
                        devilDataType = IL_FLOAT;
                        campvisDataType = WeaklyTypedPointer::FLOAT;
                        break;
                    default:
                        LERROR("unsupported data type: " << ilGetInteger(IL_IMAGE_TYPE) << " (" << files[i] << ")");
                        return;
                }

                switch (devilFormat) {
                    case IL_LUMINANCE:
                        numChannels = 1;
                        break;
                    case IL_LUMINANCE_ALPHA:
                        numChannels = 2;
                        break;
                    case IL_RGB:
                        numChannels = 3;
                        break;
                    case IL_RGBA:
                        numChannels = 4;
                        break;
                    default:
                        LERROR("unsupported image format: " << devilFormat << " (" << files[i] << ")");
                        return;
                }
                buffer = new uint8_t[cgt::hmul(imageSize) * WeaklyTypedPointer::numBytes(campvisDataType, numChannels)];
            }
            else {
                if (imageSize.x != ilGetInteger(IL_IMAGE_WIDTH)) {
                    LERROR("Could not load images: widths do not match!");
                    delete [] buffer;
                    return;
                }
                if (imageSize.y != ilGetInteger(IL_IMAGE_HEIGHT)) {
                    LERROR("Could not load images: heights do not match!");
                    delete [] buffer;
                    return;
                }
            }

            // get data from image and transform to single intensity image:
            ilCopyPixels(0, 0, 0, imageSize.x, imageSize.y, 1, devilFormat, devilDataType, buffer + (WeaklyTypedPointer::numBytes(campvisDataType, numChannels) * i * imageSize.x * imageSize.y));
            ILint err = ilGetError();
            if (err != IL_NO_ERROR) {
                LERROR("Error during conversion: " << iluErrorString(err));
                delete [] buffer;
                return;
            }

            ilDeleteImage(img);
        }

        size_t dimensionality = 3;
        if (imageSize.z == 1)
            dimensionality = 2;
        if (imageSize.y == 1)
            dimensionality = 1;


        ImageData* id = new ImageData(dimensionality, imageSize, numChannels);
        WeaklyTypedPointer wtp(campvisDataType, numChannels, buffer);
        ImageRepresentationLocal::create(id, wtp);
        //id->setMappingInformation(ImageMappingInformation(imageSize, p_imageOffset.getValue(), p_voxelSize.getValue()));

        if (p_importType.getOptionValue() == "rt") {
            RenderData* rd = new RenderData();
            rd->addColorTexture(id);

            // create fake depth image
            ImageData* idDepth = new ImageData(dimensionality, imageSize, 1);
            float* ptr = new float[cgt::hmul(imageSize)];
            memset(ptr, 0, cgt::hmul(imageSize) * sizeof(float));
            WeaklyTypedPointer wtpDepth(campvisDataType, 1, ptr);
            ImageRepresentationLocal::create(idDepth, wtpDepth);
            rd->setDepthTexture(idDepth);

            data.addData(p_targetImageID.getValue(), rd);
        }
        else {
            data.addData(p_targetImageID.getValue(), id);
        }
    }