void main() { //Calculate the ray direction using viewport information vec3 rayDirection; rayDirection.x = 2.0 * gl_FragCoord.x / WindowSize.x - 1.0; rayDirection.y = 2.0 * gl_FragCoord.y / WindowSize.y - 1.0; rayDirection.y *= WindowSize.y / WindowSize.x; rayDirection.z = -FocalLength; rayDirection = (vec4(rayDirection, 0.0) * ViewMatrix).xyz; rayDirection = normalize(rayDirection); //Cube ray intersection test vec3 invR = 1.0 / rayDirection; vec3 boxMin = vec3(-1.0,-1.0,-1.0); vec3 boxMax = vec3( 1.0, 1.0, 1.0); vec3 tbot = invR * (boxMin - RayOrigin); vec3 ttop = invR * (boxMax - RayOrigin); //Now sort all elements of tbot and ttop to find the two min and max elements vec3 tmin = min(ttop, tbot); //Closest planes vec2 t = max(tmin.xx, tmin.yz); //Out of the closest planes, find the last to be entered (collision point) float tnear = max(t.x, t.y);//... //If the viewpoint is penetrating the volume, make sure to only cast the ray //from the eye position, not behind it if (tnear < 0.0) tnear = 0.0; //Now work out when the ray will leave the volume vec3 tmax = max(ttop, tbot); //Distant planes t = min(tmax.xx, tmax.yz);//Find the first plane to be exited float tfar = min(t.x, t.y);//... //Check what the screen depth is to make sure we don't sample the //volume past any standard GL objects float bufferDepth = texture2D(DepthTexture, gl_FragCoord.xy / WindowSize.xy).r; float depth = recalcZCoord(bufferDepth); if (tfar > depth) tfar = depth; //This value is used to ensure that changing the step size does not //change the visualization as the alphas are renormalized using it. //For more information see the loop below where it is used const float baseStepSize = 0.01; //We need to calculate the ray's starting position. We add a random //fraction of the stepsize to the original starting point to dither //the output float random = DitherRay * fract(sin(gl_FragCoord.x * 12.9898 + gl_FragCoord.y * 78.233) * 43758.5453); vec3 rayPos = RayOrigin + rayDirection * (tnear + StepSize * random); //The color accumulation variable vec4 color = vec4(0.0, 0.0, 0.0, 0.0); //We store the last valid normal, incase we hit a homogeneous region //and need to reuse it, but at the start we have no normal vec3 lastnorm = vec3(0,0,0); for (float length = tfar - tnear; length > 0.0; length -= StepSize, rayPos.xyz += rayDirection * StepSize) { //Grab the volume sample vec4 sample = texture3D(DataTexture, (rayPos + 1.0) * 0.5); //Sort out the normal data vec3 norm = sample.xyz * 2.0 - 1.0; //Test if we've got a bad normal and need to reuse the old one if (dot(norm,norm) < 0.5) norm = lastnorm; //Store the current normal lastnorm = norm; //Calculate the color of the voxel using the transfer function vec4 src = texture1D(TransferTexture, sample.a); //This corrects the transparency change caused by changing step //size. All alphas are defined for a certain base step size src.a = 1.0 - pow((1.0 - src.a), StepSize / baseStepSize); ////////////Lighting calculations //We perform all the calculations in the model (untransformed) //space. vec3 lightDir = normalize(LightPosition - rayPos); float lightNormDot = dot(normalize(norm), lightDir); //Diffuse lighting float diffTerm = max(0.5 * lightNormDot + 0.5, 0.5); //Quadratic falloff of the diffusive term diffTerm *= diffTerm; //We either use diffusive lighting plus an ambient, or if its //disabled (DiffusiveLighting = 0), we just use the original //color. vec3 ambient = vec3(0.1,0.1,0.1); src.rgb *= DiffusiveLighting * (diffTerm + ambient) + (1.0 - DiffusiveLighting); //Specular lighting term //This is enabled if (SpecularLighting == 1) vec3 ReflectedRay = reflect(lightDir, norm); src.rgb += SpecularLighting * (lightNormDot > 0) //Test to ensure that specular is only //applied to front facing voxels * vec3(1.0,1.0,1.0) * pow(max(dot(ReflectedRay, rayDirection), 0.0), 96.0); ///////////Front to back blending src.rgb *= src.a; color = (1.0 - color.a) * src + color; //We only accumulate up to 0.95 alpha (the front to back //blending never reaches 1). if (color.a >= 0.95) { //We have to renormalize the color by the alpha value (see //below) color.rgb /= color.a; //Set the alpha to one to make sure the pixel is not transparent color.a = 1.0; break; } } /*We must renormalize the color by the alpha value. For example, if our ray only hits just one white voxel with a alpha of 0.5, we will have src.rgb = vec4(1,1,1,0.5) src.rgb *= src.a; //which gives, src.rgb = 0.5 * src.rgb = vec4(0.5,0.5,0.5,0.5) color = (1.0 - color.a) * src + color; //which gives, color = (1.0 - 0) * vec4(0.5,0.5,0.5,0.5) + vec4(0,0,0,0) = vec4(0.5,0.5,0.5,0.5) So the final color of the ray is half way between white and black, but the voxel it hit was white! The solution is to divide by the alpha, as this is the "amount of color" added to color. */ color.rgb /= (color.a == 0.0) + color.a; gl_FragColor = color; });
void PointBasedLightingShader::compileShader(void) { const GLLightTracker& lt=*(contextData.getLightTracker()); const GLClipPlaneTracker& cpt=*(contextData.getClipPlaneTracker()); std::string vertexShaderDefines; std::string vertexShaderFunctions; std::string vertexShaderMain; if(usePlaneDistance) { /* Create the plane distance mapping uniforms: */ vertexShaderMain+="\ uniform vec4 planeDistancePlane;\n\ uniform sampler1D planeDistanceMap;\n\ \n"; } /* Create the main vertex shader starting boilerplate: */ vertexShaderMain+="\ void main()\n\ {\n\ /* Compute the vertex position in eye coordinates: */\n\ vec4 vertexEc=gl_ModelViewMatrix*gl_Vertex;\n\ \n\ /* Compute the normal vector in eye coordinates: */\n\ vec3 normalEc=normalize(gl_NormalMatrix*gl_Normal);\n\ \n\ /* Let the normal vector always point towards the eye: */\n\ normalEc=faceforward(normalEc,normalEc,vertexEc.xyz);\n\ \n"; /* Get the material components: */ if(usePlaneDistance) { #ifdef LIDARVIEWER_VISUALIZE_WATER vertexShaderMain+="\ /* Calculate the distance from the water surface: */\n\ float planeDist=dot(planeDistancePlane,gl_Vertex);\n\ vec4 ambient,diffuse;\n\ if(planeDist<=0.5)\n\ {\n\ /* Get the material properties from the plane distance texture: */\n\ ambient=texture1D(planeDistanceMap,planeDist);\n\ diffuse=ambient;\n\ }\n\ else\n\ {\n"; if(usePointColors) { vertexShaderMain+="\ /* Get the material properties from the current color: */\n\ ambient=gl_Color;\n\ diffuse=gl_Color;\n"; } else { vertexShaderMain+="\ /* Get the material properties from the material state: */\n\ ambient=gl_FrontMaterial.ambient;\n\ diffuse=gl_FrontMaterial.diffuse;\n"; } vertexShaderMain+="\ }\n"; #else vertexShaderMain+="\ /* Get the material properties from the plane distance texture: */\n\ float planeDist=dot(planeDistancePlane,gl_Vertex);\n\ vec4 ambient=texture1D(planeDistanceMap,planeDist);\n\ vec4 diffuse=ambient;\n"; #endif } else if(usePointColors) { vertexShaderMain+="\ /* Get the material properties from the current color: */\n\ vec4 ambient=gl_Color;\n\ vec4 diffuse=gl_Color;\n"; } else { vertexShaderMain+="\ /* Get the material properties from the material state: */\n\ vec4 ambient=gl_FrontMaterial.ambient;\n\ vec4 diffuse=gl_FrontMaterial.diffuse;\n"; } vertexShaderMain+="\ vec4 specular=gl_FrontMaterial.specular;\n\ float shininess=gl_FrontMaterial.shininess;\n\ \n"; /* Continue the main vertex shader: */ vertexShaderMain+="\ /* Calculate global ambient light term: */\n\ vec4 ambientDiffuseAccum=gl_LightModel.ambient*ambient;\n\ vec4 specularAccum=vec4(0.0,0.0,0.0,0.0);\n\ \n\ /* Accumulate all enabled light sources: */\n"; /* Create light application functions for all enabled light sources: */ for(int lightIndex=0;lightIndex<lt.getMaxNumLights();++lightIndex) if(lt.getLightState(lightIndex).isEnabled()) { /* Create the light accumulation function: */ vertexShaderFunctions+=lt.createAccumulateLightFunction(lightIndex); /* Call the light application function from the shader's main function: */ vertexShaderMain+="\ accumulateLight"; char liBuffer[12]; vertexShaderMain.append(Misc::print(lightIndex,liBuffer+11)); vertexShaderMain+="(vertexEc,normalEc,ambient,diffuse,specular,shininess,ambientDiffuseAccum,specularAccum);\n"; }