vec3 e(vec3 s, int i)\ {\ float n=1.;\ s*=2.;\ vec3 r=vec3(0.);\ for(;i>0;i--)\ {\ r+=texture3D(t,s).xyz*n;\ n*=0.7;\ s*=1.93;\ }\ return r;\ }\
virtual void initEvent() { vl::Log::print(appletInfo()); trackball()->setTransform(rendering()->as<vl::Rendering>()->transform()); mMipmappingOn = true; mLodBias = 0.0; multitexturing(); textureRectangle(); texture3D(); texture2DArray(); texture1DArray(); sphericalMapping(); cubeMapping(); }
void main() { //Calculate the ray direction using viewport information vec3 rayDirection; rayDirection.x = 2.0 * gl_FragCoord.x / WindowSize.x - 1.0; rayDirection.y = 2.0 * gl_FragCoord.y / WindowSize.y - 1.0; rayDirection.y *= WindowSize.y / WindowSize.x; rayDirection.z = -FocalLength; rayDirection = (vec4(rayDirection, 0.0) * ViewMatrix).xyz; rayDirection = normalize(rayDirection); //Cube ray intersection test vec3 invR = 1.0 / rayDirection; vec3 boxMin = vec3(-1.0,-1.0,-1.0); vec3 boxMax = vec3( 1.0, 1.0, 1.0); vec3 tbot = invR * (boxMin - RayOrigin); vec3 ttop = invR * (boxMax - RayOrigin); //Now sort all elements of tbot and ttop to find the two min and max elements vec3 tmin = min(ttop, tbot); //Closest planes vec2 t = max(tmin.xx, tmin.yz); //Out of the closest planes, find the last to be entered (collision point) float tnear = max(t.x, t.y);//... //If the viewpoint is penetrating the volume, make sure to only cast the ray //from the eye position, not behind it if (tnear < 0.0) tnear = 0.0; //Now work out when the ray will leave the volume vec3 tmax = max(ttop, tbot); //Distant planes t = min(tmax.xx, tmax.yz);//Find the first plane to be exited float tfar = min(t.x, t.y);//... //Check what the screen depth is to make sure we don't sample the //volume past any standard GL objects float bufferDepth = texture2D(DepthTexture, gl_FragCoord.xy / WindowSize.xy).r; float depth = recalcZCoord(bufferDepth); if (tfar > depth) tfar = depth; //This value is used to ensure that changing the step size does not //change the visualization as the alphas are renormalized using it. //For more information see the loop below where it is used const float baseStepSize = 0.01; //We need to calculate the ray's starting position. We add a random //fraction of the stepsize to the original starting point to dither //the output float random = DitherRay * fract(sin(gl_FragCoord.x * 12.9898 + gl_FragCoord.y * 78.233) * 43758.5453); vec3 rayPos = RayOrigin + rayDirection * (tnear + StepSize * random); //The color accumulation variable vec4 color = vec4(0.0, 0.0, 0.0, 0.0); //We store the last valid normal, incase we hit a homogeneous region //and need to reuse it, but at the start we have no normal vec3 lastnorm = vec3(0,0,0); for (float length = tfar - tnear; length > 0.0; length -= StepSize, rayPos.xyz += rayDirection * StepSize) { //Grab the volume sample vec4 sample = texture3D(DataTexture, (rayPos + 1.0) * 0.5); //Sort out the normal data vec3 norm = sample.xyz * 2.0 - 1.0; //Test if we've got a bad normal and need to reuse the old one if (dot(norm,norm) < 0.5) norm = lastnorm; //Store the current normal lastnorm = norm; //Calculate the color of the voxel using the transfer function vec4 src = texture1D(TransferTexture, sample.a); //This corrects the transparency change caused by changing step //size. All alphas are defined for a certain base step size src.a = 1.0 - pow((1.0 - src.a), StepSize / baseStepSize); ////////////Lighting calculations //We perform all the calculations in the model (untransformed) //space. vec3 lightDir = normalize(LightPosition - rayPos); float lightNormDot = dot(normalize(norm), lightDir); //Diffuse lighting float diffTerm = max(0.5 * lightNormDot + 0.5, 0.5); //Quadratic falloff of the diffusive term diffTerm *= diffTerm; //We either use diffusive lighting plus an ambient, or if its //disabled (DiffusiveLighting = 0), we just use the original //color. vec3 ambient = vec3(0.1,0.1,0.1); src.rgb *= DiffusiveLighting * (diffTerm + ambient) + (1.0 - DiffusiveLighting); //Specular lighting term //This is enabled if (SpecularLighting == 1) vec3 ReflectedRay = reflect(lightDir, norm); src.rgb += SpecularLighting * (lightNormDot > 0) //Test to ensure that specular is only //applied to front facing voxels * vec3(1.0,1.0,1.0) * pow(max(dot(ReflectedRay, rayDirection), 0.0), 96.0); ///////////Front to back blending src.rgb *= src.a; color = (1.0 - color.a) * src + color; //We only accumulate up to 0.95 alpha (the front to back //blending never reaches 1). if (color.a >= 0.95) { //We have to renormalize the color by the alpha value (see //below) color.rgb /= color.a; //Set the alpha to one to make sure the pixel is not transparent color.a = 1.0; break; } } /*We must renormalize the color by the alpha value. For example, if our ray only hits just one white voxel with a alpha of 0.5, we will have src.rgb = vec4(1,1,1,0.5) src.rgb *= src.a; //which gives, src.rgb = 0.5 * src.rgb = vec4(0.5,0.5,0.5,0.5) color = (1.0 - color.a) * src + color; //which gives, color = (1.0 - 0) * vec4(0.5,0.5,0.5,0.5) + vec4(0,0,0,0) = vec4(0.5,0.5,0.5,0.5) So the final color of the ray is half way between white and black, but the voxel it hit was white! The solution is to divide by the alpha, as this is the "amount of color" added to color. */ color.rgb /= (color.a == 0.0) + color.a; gl_FragColor = color; });
void widget::initializeGL() { std::ios_base::sync_with_stdio(false); // turns off sync between C and C++ output streams(to increase output speed) initializeOpenGLFunctions(); ogllogger.initialize(); QObject::connect(&ogllogger, &QOpenGLDebugLogger::messageLogged, [](const QOpenGLDebugMessage & msg){ qDebug() << msg; }); ogllogger.startLogging(); GLint iUnits, texture_units, max_tu; glGetIntegerv(GL_MAX_TEXTURE_UNITS, &iUnits); glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &texture_units); glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &max_tu); std::cout << "MultiTexture: " << iUnits << ' ' << texture_units << ' ' << max_tu << std::endl; glEnable(GL_TEXTURE_3D); // glEnable(GL_DEPTH_TEST); // glDepthFunc(GL_LEQUAL); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glClearColor(1.0f, 1.0f, 1.0f, 1.0f); std::vector<char> data; // ------- load raw data ------- layers.emplace_back(); layers.back().opacity = 1.0f; { data.resize(std::pow(cpucubeedge, 3)); std::fill(std::begin(data), std::end(data), 0); layers.back().bogusCube.reset(new gpu_raw_cube(gpucubeedge)); boost::multi_array_ref<std::uint8_t, 3> cube(reinterpret_cast<std::uint8_t*>(data.data()), boost::extents[cpucubeedge][cpucubeedge][cpucubeedge]); using range = boost::multi_array_types::index_range; layers.back().bogusCube->generate(cube[boost::indices[range(0,gpucubeedge)][range(cpucubeedge-gpucubeedge,cpucubeedge-0)][range(0,gpucubeedge)]]); } const auto factor = cpucubeedge / gpucubeedge; const QString basePath("D:/New folder/cubes/2012-03-07_AreaX14_mag1_x%1_y%2_z%3.raw"); offset = {29, 46, 23}; for (int z = 0; z < supercubeedge; z += factor) for (int y = 0; y < supercubeedge; y += factor) for (int x = 0; x < supercubeedge; x += factor) { const int cubex = offset.x() + x / factor; const int cubey = offset.y() + y / factor; const int cubez = offset.z() + z / factor; std::string path = basePath.arg(cubex, 4, 10, QLatin1Char('0')).arg(cubey, 4, 10, QLatin1Char('0')).arg(cubez, 4, 10, QLatin1Char('0')).toStdString(); std::ifstream file(path, std::ios_base::binary); data.resize(std::pow(cpucubeedge, 3)); if (file) { file.read(data.data(), data.size()); } else { std::cout << path << " failed" << std::endl; continue; } boost::multi_array_ref<std::uint8_t, 3> cube(reinterpret_cast<std::uint8_t*>(data.data()), boost::extents[cpucubeedge][cpucubeedge][cpucubeedge]); for (int zi = z; zi < z + factor; ++zi) for (int yi = y; yi < y + factor; ++yi) for (int xi = x; xi < x + factor; ++xi) { const auto x_offset = gpucubeedge * (xi % factor); const auto y_offset = gpucubeedge * (yi % factor); const auto z_offset = gpucubeedge * (zi % factor); using range = boost::multi_array_types::index_range; const auto view = cube[boost::indices[range(0+z_offset,gpucubeedge+z_offset)][range(0+y_offset,gpucubeedge+y_offset)][range(0+x_offset,gpucubeedge+x_offset)]]; layers.back().textures[QVector3D(xi, yi, zi)].reset(new gpu_raw_cube(gpucubeedge)); static_cast<gpu_raw_cube*>(layers.back().textures[QVector3D(xi, yi, zi)].get())->generate(view); } } // ------- load overlay data ------- layers.emplace_back(); layers.back().opacity = 0.5f; layers.back().isOverlayData = true; { data.resize(std::pow(cpucubeedge, 3) * 8); std::fill(std::begin(data), std::end(data), 0); layers.back().bogusCube.reset(new gpu_lut_cube(gpucubeedge)); boost::multi_array_ref<std::uint64_t, 3> cube(reinterpret_cast<std::uint64_t*>(data.data()), boost::extents[cpucubeedge][cpucubeedge][cpucubeedge]); using range = boost::multi_array_types::index_range; static_cast<gpu_lut_cube*>(layers.back().bogusCube.get())->generate(cube[boost::indices[range(0,gpucubeedge)][range(cpucubeedge-gpucubeedge,cpucubeedge-0)][range(0,gpucubeedge)]]); } for (int z = 0; z < supercubeedge; z += factor) for (int y = 0; y < supercubeedge; y += factor) for (int x = 0; x < supercubeedge; x += factor) { const int cubex = offset.x() + x / factor; const int cubey = offset.y() + y / factor; const int cubez = offset.z() + z / factor; std::string path = basePath.arg(cubex, 4, 10, QLatin1Char('0')).arg(cubey, 4, 10, QLatin1Char('0')).arg(cubez, 4, 10, QLatin1Char('0')).toStdString() + ".segmentation.raw"; std::ifstream file(path, std::ios_base::binary); data.resize(std::pow(cpucubeedge, 3)*8); if (file) { file.read(data.data(), data.size()); } else { std::cout << path << " failed" << std::endl; continue; } boost::multi_array_ref<std::uint64_t, 3> cube(reinterpret_cast<std::uint64_t*>(data.data()), boost::extents[cpucubeedge][cpucubeedge][cpucubeedge]); for (int zi = z; zi < z + factor; ++zi) for (int yi = y; yi < y + factor; ++yi) for (int xi = x; xi < x + factor; ++xi) { const auto x_offset = gpucubeedge * (xi % factor); const auto y_offset = gpucubeedge * (yi % factor); const auto z_offset = gpucubeedge * (zi % factor); using range = boost::multi_array_types::index_range; const auto view = cube[boost::indices[range(0+z_offset,gpucubeedge+z_offset)][range(0+y_offset,gpucubeedge+y_offset)][range(0+x_offset,gpucubeedge+x_offset)]]; layers.back().textures[QVector3D(xi, yi, zi)].reset(new gpu_lut_cube(gpucubeedge)); static_cast<gpu_lut_cube*>(layers.back().textures[QVector3D(xi, yi, zi)].get())->generate(view); } } auto vertex_shader_code = R"shaderSource( #version 110 uniform mat4 model_matrix; uniform mat4 view_matrix; uniform mat4 projection_matrix; attribute vec3 vertex; attribute vec3 texCoordVertex; varying vec3 texCoordFrag; void main() { mat4 mvp_mat = projection_matrix * view_matrix * model_matrix; gl_Position = mvp_mat * vec4(vertex, 1.0); texCoordFrag = texCoordVertex; })shaderSource"; raw_data_shader.addShaderFromSourceCode(QOpenGLShader::Vertex, vertex_shader_code); raw_data_shader.addShaderFromSourceCode(QOpenGLShader::Fragment, R"shaderSource( #version 110 uniform float textureOpacity; uniform sampler3D texture; varying vec3 texCoordFrag;//in //varying vec4 gl_FragColor;//out void main() { gl_FragColor = vec4(vec3(texture3D(texture, texCoordFrag).r), textureOpacity); })shaderSource");