コード例 #1
0
ファイル: encode_pivot_quads.geom.c プロジェクト: ileben/RAVG
void main()
{
  //Copy the input vertex coordinates to three output variables
  //These will stay the same for every output vertex (i.e. all over the primitive)
  quad0 = gl_in[0].gl_Position.xy;
  quad1 = gl_in[1].gl_Position.xy;
  quad2 = gl_in[2].gl_Position.xy;

  //Get object pointer and grid info
  int *ptrObj = ptrObjects + objectId * NODE_SIZE_OBJINFO;
  ivec2 objGridOrigin = ivec2( ptrObj[0], ptrObj[1] );

  //Find the bounds of the input triangle
  vec2 gridMin = min( quad0, min( quad1, quad2 ));
  vec2 gridMax = max( quad0, max( quad1, quad2 ));
  
  //Transform and round to grid space
  gridMin = floor( (gridMin - gridOrigin) / cellSize );
  gridMax = ceil( (gridMax - gridOrigin) / cellSize );
  quadMin = ivec2( gridMin );

  //Extend left side to object boundary
  gridMin.x = objGridOrigin.x;

  //Transform into [-1,1] normalized coordinates (glViewport will transform back)
  gridMin = (gridMin / gridSize) * 2.0 - vec2(1.0);
  gridMax = (gridMax / gridSize) * 2.0 - vec2(1.0);
  
  //Emit triangle strip forming the bounding box
  gl_Position = vec4( gridMin.x, gridMin.y, 0, 1 ); EmitVertex();
  gl_Position = vec4( gridMax.x, gridMin.y, 0, 1 ); EmitVertex();
  gl_Position = vec4( gridMin.x, gridMax.y, 0, 1 ); EmitVertex();
  gl_Position = vec4( gridMax.x, gridMax.y, 0, 1 ); EmitVertex();
  EndPrimitive();
}
コード例 #2
0
ファイル: encode_aux_quads.geom.c プロジェクト: ileben/RAVG
void main()
{
  //Copy the input vertex coordinates to three output variables
  //These will stay the same for every output vertex (i.e. all over the primitive)
  quad0 = gl_in[0].gl_Position.xy;
  quad1 = gl_in[1].gl_Position.xy;
  quad2 = gl_in[2].gl_Position.xy;

  //Find the bounds of the input triangle
  vec2 pmin = min( quad0, min( quad1, quad2 ));
  vec2 pmax = max( quad0, max( quad1, quad2 ));
  
  //Transform and round to grid space
  pmin = floor( (pmin - gridOrigin) / cellSize );
  pmax = ceil( (pmax - gridOrigin) / cellSize );

  //Transform into [-1,1] normalized coordinates (glViewport will transform back)
  pmin = (pmin / gridSize) * 2.0 - vec2(1.0);
  pmax = (pmax / gridSize) * 2.0 - vec2(1.0);
  
  //Emit triangle strip forming the bounding box
  gl_Position = vec4( pmin.x, pmin.y, 0, 1 ); EmitVertex();
  gl_Position = vec4( pmax.x, pmin.y, 0, 1 ); EmitVertex();
  gl_Position = vec4( pmin.x, pmax.y, 0, 1 ); EmitVertex();
  gl_Position = vec4( pmax.x, pmax.y, 0, 1 ); EmitVertex();
  EndPrimitive();
}
コード例 #3
0
ファイル: depthmapRenderer.cpp プロジェクト: Cerarus/v4r
 void main(){\
     unsigned int ind = atomicCounterIncrement(faceCount);\n\
     index= ind+1;\n\
     gl_Position=project(transformation*gl_in[0].gl_Position);\n\
     vec4 p1=transformation*gl_in[0].gl_Position;\n\
     vec2 pp1=gl_Position.xy;\n\
     z=-(transformation*gl_in[0].gl_Position).z;\n\
     color=colorIn[0];\n\
     EmitVertex();\n\
     gl_Position=project(transformation*gl_in[1].gl_Position);\n\
     vec2 pp2=gl_Position.xy;\n\
     vec4 p2=transformation*gl_in[1].gl_Position;\n\
     z=-(transformation*gl_in[1].gl_Position).z;\n\
     color=colorIn[1];\n\
     EmitVertex();\n\
     gl_Position=project(transformation*gl_in[2].gl_Position);\n\
     vec4 p3=transformation*gl_in[2].gl_Position;\n\
     vec2 pp3=gl_Position.xy;\n\
     z=-(transformation*gl_in[2].gl_Position).z;\n\
     color=colorIn[2];\n\
     EmitVertex();\n\
     //calc triangle surface area\n\
     float A= length(cross(vec3(p1)/p1.w-vec3(p3)/p3.w,vec3(p2)/p2.w-vec3(p3)/p3.w));//TODO: Change this to correct pixel area calculation\n\
     vec3 a=vec3((pp2.x-pp1.x)*float(viewportRes.x),(pp2.y-pp1.y)*float(viewportRes.y),0)*0.5;\n\
     vec3 b=vec3((pp3.x-pp1.x)*float(viewportRes.x),(pp3.y-pp1.y)*float(viewportRes.y),0)*0.5;\n\
     float Apix=length(cross(a,b))*0.5;\n\
     AnPixCnt[ind]=vec2(A,Apix);\n\
 }";
コード例 #4
0
	void main()\n\
	{\n\
	    vec2 v[3];\n\
	    vec2 p0 = WindowSize * gl_in[0].gl_Position.xy/gl_in[0].gl_Position.w;\n\
	    vec2 p1 = WindowSize * gl_in[1].gl_Position.xy/gl_in[1].gl_Position.w;\n\
	    vec2 p2 = WindowSize * gl_in[2].gl_Position.xy/gl_in[2].gl_Position.w;\n\
	    v[0] = p2-p1;\n\
	    v[1] = p2-p0;\n\
	    v[2] = p1-p0;\n\
	    float area = abs(v[1].x*v[2].y - v[1].y * v[2].x);\n\
	    for(int i = 0; i < 3; i++)\n\
	    {\n\
	        out_st = in_st[i];\n\
	        out_stp = in_stp[i];\n\
	        out_normal = in_normal[i];\n\
	        out_color = in_color[i];\n\
	        out_position = in_position[i];\n\
	        float dist = area/length(v[i]);\n\
	        out_distance_to_edge = vec3(0,0,0);\n\
	        out_distance_to_edge[i] = dist;\n\
	        gl_Position = gl_in[i].gl_Position;\n\
	        GS(i);\n\
	        EmitVertex();\n\
	    }\n\
}";
コード例 #5
0
ファイル: ptree_leaf_gs.c プロジェクト: mickymuis/folia
void
emitAndMult( vec3 v, vec3 n, vec2 t ) {
	gs_out.normal = mat3(mat_model) * n;
	gs_out.texCoords = t;
	gl_Position = mat_projection * mat_view * mat_model * vec4( v, 1 );
	EmitVertex();
}
コード例 #6
0
void main(){

	//input line coordinates
	vec2 p0 = gl_PositionIn[0].xy; 
	vec2 p1 = gl_PositionIn[1].xy;

	//generate 50 lines and distort with perlin noise
	for (int i = 0; i < 50; i++){
		float x0 = snoise(vec4(p0 *3.0, time * 0.5 + 12.4, i));
		float y0 = snoise( vec4( p0 * 3.0, time * 0.5 + 304.2, i ) );
		float x1 = snoise( vec4( p1 * 3.0, time * 0.5 + 20.1,  i ) );
		float y1 = snoise( vec4( p1 * 3.0, time * 0.5 + 43.6,  i ) );
		vec2 q0 = p0 + vec2( x0, y0 ) * 25.0;
		vec2 q1 = p1 + vec2( x1, y1 ) * 25.0;

		//calculate color
		vec4 color = vec4(0.0);

		vec2 delta = q1 - q0;
		float len = distance( delta, vec2( 0.0 ) );
		if ( len > 0 ) {
			int n = int( len ) + 1;
			delta /= n;
			for (int i=0; i<n; i++) {
				color += texture2DRect( texture, q0 + delta * i );
			}
			color /= n;
		}

		color.a *= 0.4;	//Making output color transparent

		//output line
		gl_Position = gl_ModelViewProjectionMatrix * vec4(q0, 0.0, 1.0);	
		gl_FrontColor = color; 	//0 - left color, 1 - right color
		EmitVertex();

		gl_Position = gl_ModelViewProjectionMatrix * vec4(q1, 0.0, 1.0);	
		gl_FrontColor = color; 	//0 - left color, 1 - right color
		EmitVertex();
		EndPrimitive();
	}
		
}
コード例 #7
0
static void EndPrimitive(ShaderCode& out, const geometry_shader_uid_data* uid_data, APIType ApiType)
{
  if (uid_data->wireframe)
    EmitVertex(out, uid_data, "first", ApiType);

  if (ApiType == APIType::OpenGL || ApiType == APIType::Vulkan)
    out.Write("\tEndPrimitive();\n");
  else
    out.Write("\toutput.RestartStrip();\n");
}
コード例 #8
0
ファイル: geomtess.c プロジェクト: zx-zheng/visShadow
void main(){
  for(int layer = 0; layer < maxlight; layer++){
    gl_Layer = layer;
    for(int i = 0; i < gl_in.length(); i++){
      Geom.worldpos = lightsview[i] * gl_in[i].gl_Position;
      gl_Position = lightsproj[i] * Geom.worldpos;
      Geom.z = gl_Position.z/gl_Position.w;
      EmitVertex();
    }
    EndPrimitive();
  }
}
コード例 #9
0
void drawPatch(GLfloat *ptrData, GLuint &ptrIndex, GLfloat p1[3], GLfloat p2[3], GLfloat p3[3], int level, GLuint *count)
{
	int i;
	if (level > 0) {
		GLfloat q1[3],q2[3],q3[3];		 // sub-vertices
		for (i=0; i<3; i++) {
			q1[i] = 0.5f*(p1[i]+p2[i]);
			q2[i] = 0.5f*(p2[i]+p3[i]);
			q3[i] = 0.5f*(p3[i]+p1[i]);
		}
		GLfloat length1 = (GLfloat)(1.0/sqrt(q1[0]*q1[0]+q1[1]*q1[1]+q1[2]*q1[2]));
		GLfloat length2 = (GLfloat)(1.0/sqrt(q2[0]*q2[0]+q2[1]*q2[1]+q2[2]*q2[2]));
		GLfloat length3 = (GLfloat)(1.0/sqrt(q3[0]*q3[0]+q3[1]*q3[1]+q3[2]*q3[2]));
		for (i=0; i<3; i++) {
			q1[i] *= length1;
			q2[i] *= length2;
			q3[i] *= length3;
		}
		drawPatch(ptrData,ptrIndex,p1,q1,q3,level-1,count);
		drawPatch(ptrData,ptrIndex,q1,p2,q2,level-1,count);
		drawPatch(ptrData,ptrIndex,q1,q2,q3,level-1,count);
		drawPatch(ptrData,ptrIndex,q3,q2,p3,level-1,count);
	} else {
		if( count ) {
			*count += 3;
		} else {
			EmitVertex(ptrData,ptrIndex,p1[0],p1[1],p1[2]); // vertex
			EmitVertex(ptrData,ptrIndex,p1[0],p1[1],p1[2]); // normal
			EmitVertex(ptrData,ptrIndex,p2[0],p2[1],p2[2]); // vertex
			EmitVertex(ptrData,ptrIndex,p2[0],p2[1],p2[2]); // normal
			EmitVertex(ptrData,ptrIndex,p3[0],p3[1],p3[2]); // vertex
			EmitVertex(ptrData,ptrIndex,p3[0],p3[1],p3[2]); // normal
		}
	}
}
コード例 #10
0
void main(void)                                                                   \n\
{                                                                                 \n\
    int i = 0;                                                                \n\
    transform( gl_Position, gl_PositionIn[i], vOrder[i] );                    \n\
    EmitVertex();                                                             \n\
                                                                              \n\
    transform( gl_Position, gl_PositionIn[i+1], vOrder[i+1] );                \n\
    EmitVertex();                                                             \n\
                                                                              \n\
    vec4 diff = vec4( vNormal[i]*vThickness[i], gl_PositionIn[i+1].w );       \n\
    transform( gl_Position, gl_PositionIn[i+1] + diff,                        \n\
                vOrder[i+1] );                                                \n\
    EmitVertex();                                                             \n\
                                                                              \n\
    transform( gl_Position, gl_PositionIn[i] + diff,                          \n\
                vOrder[i] );                                                  \n\
    EmitVertex();                                                             \n\
                                                                              \n\
    transform( gl_Position, gl_PositionIn[i], vOrder[i] );                    \n\
    EmitVertex();                                                             \n\
    EndPrimitive();                                                           \n\
}                                                                                 \n";
コード例 #11
0
ファイル: geometry.c プロジェクト: riskybacon/spikes
void main() {

   // Pick one of the endpoints, and translate
   // it to the origin in the x,y plane.
   mat4 trans = 
      mat4 (1, 0, 0, -gl_in[0].gl_Position.x,
            0, 1, 0, -gl_in[0].gl_Position.y,
            0, 0, 1, 0,
            0, 0, 0, 1);
   
   // Inverse translation
   mat4 transInv =
      mat4 (1, 0, 0, gl_in[0].gl_Position.x,
            0, 1, 0, gl_in[0].gl_Position.y,
            0, 0, 1, 0,
            0, 0, 0, 1);
   
   vec4 pos[6];
   pos[2] = gl_in[0].gl_Position; // At (x,y) origin
   pos[3] = gl_in[1].gl_Position; // Not at (x,y) origin
   
   
   float width = 0.5;
   
   mat4 moveUp = 
      mat4(1, 0, 0, 0,
           0, 1, 0, width,
           0, 0, 1, 0,
           0, 0, 0, 1);

   mat4 moveDown = 
      mat4(1, 0, 0, 0,
           0, 1, 0, -width,
           0, 0, 1, 0,
           0, 0, 0, 1);

#if 0
   // Rotate about the z-axis to get the line
   // into the x / z plane. Actually, we know
   // what the end result of that rotation will
   // look like without performing the rotation,
   // all that is need to be known is the length
   // of the line. 
   //
   // What is needed is the inverse of the 
   // rotation matrix to rotate the new points
   // back into their original basis
   float cosTheta;
   float sinTheta;
   float d = sqrt(pos[3].x * pos[3].x + pos[3].y * pos[3].y);

   pos[3].y = 0;

   if(pos[1].x >= 0 && pos[1].y >= 0) {
      sinTheta = pos[1].y / d;
      cosTheta = pos[1].x / d;
      pos[3].x = d;
   }

   if(pos[1].x < 0 && pos[1].y >= 0) {
      // Make sin(theta) -sin(theta) for
      // the inv rotation. -cos(theta) = cos(theta)
      sinTheta = -pos[1].y / d;
      cosTheta = -pos[1].x / d;
      pos[3].x = -d;
   }

   if(pos[1].x < 0 && pos[1].y < 0) {
      sinTheta = -pos[1].y / d;
      cosTheta = -pos[1].x / d;
      pos[3].x = -d;
      pos[3].y = 0;
   }

   if(pos[1].x >= 0 && pos[1].y < 0) {
      // Make sin(theta) -sin(theta) for
      // the inv rotation. -cos(theta) = cos(theta)
      sinTheta = pos[1].y / d;
      cosTheta = pos[1].x / d;
      pos[3].x = d;
   }

   mat4 rotInv =
      mat4 (cosTheta, sinTheta, 0, 0,
            sinTheta, cosTheta, 0, 0,
            0,        0,        1, 0,
            0,        0,        0, 1);
   
   vec4 width = vec4(0, 0.5, 0, 0) * 0.1;
   
   pos[0] = pos[2] + width;
   pos[4] = pos[2] - width;
   pos[1] = pos[3] + width;
   pos[5] = pos[3] - width;
   
   // Emit transformed vertices
   for(int i = 0; i < 6; i++)
   {
      // Transform the vertex into the view plane
      gl_Position = proj * transInv * pos[i];
      // Set the out color for this vertex
      vertexColor = geomColor[i];
      // Emit the vertex
      EmitVertex();
   }
#endif
   
   gl_Position = proj * transInv * pos[2];
   vertexColor = geomColor[0];
   EmitVertex();
   gl_Position = proj * transInv * pos[3];
   vertexColor = geomColor[1];
   EmitVertex();
   
   // Done composing the primitive
   EndPrimitive();
}
コード例 #12
0
ShaderCode GenerateGeometryShaderCode(APIType ApiType, const geometry_shader_uid_data* uid_data)
{
  ShaderCode out;
  // Non-uid template parameters will write to the dummy data (=> gets optimized out)

  const unsigned int vertex_in = uid_data->primitive_type + 1;
  unsigned int vertex_out = uid_data->primitive_type == PRIMITIVE_TRIANGLES ? 3 : 4;

  if (uid_data->wireframe)
    vertex_out++;

  if (ApiType == APIType::OpenGL || ApiType == APIType::Vulkan)
  {
    // Insert layout parameters
    if (g_ActiveConfig.backend_info.bSupportsGSInstancing)
    {
      out.Write("layout(%s, invocations = %d) in;\n", primitives_ogl[uid_data->primitive_type],
                uid_data->stereo ? 2 : 1);
      out.Write("layout(%s_strip, max_vertices = %d) out;\n",
                uid_data->wireframe ? "line" : "triangle", vertex_out);
    }
    else
    {
      out.Write("layout(%s) in;\n", primitives_ogl[uid_data->primitive_type]);
      out.Write("layout(%s_strip, max_vertices = %d) out;\n",
                uid_data->wireframe ? "line" : "triangle",
                uid_data->stereo ? vertex_out * 2 : vertex_out);
    }
  }

  out.Write("%s", s_lighting_struct);

  // uniforms
  if (ApiType == APIType::OpenGL || ApiType == APIType::Vulkan)
    out.Write("UBO_BINDING(std140, 3) uniform GSBlock {\n");
  else
    out.Write("cbuffer GSBlock {\n");

  out.Write("\tfloat4 " I_STEREOPARAMS ";\n"
            "\tfloat4 " I_LINEPTPARAMS ";\n"
            "\tint4 " I_TEXOFFSET ";\n"
            "};\n");

  out.Write("struct VS_OUTPUT {\n");
  GenerateVSOutputMembers<ShaderCode>(out, ApiType, uid_data->numTexGens, uid_data->pixel_lighting,
                                      "");
  out.Write("};\n");

  if (ApiType == APIType::OpenGL || ApiType == APIType::Vulkan)
  {
    if (g_ActiveConfig.backend_info.bSupportsGSInstancing)
      out.Write("#define InstanceID gl_InvocationID\n");

    out.Write("VARYING_LOCATION(0) in VertexData {\n");
    GenerateVSOutputMembers<ShaderCode>(
        out, ApiType, uid_data->numTexGens, uid_data->pixel_lighting,
        GetInterpolationQualifier(uid_data->msaa, uid_data->ssaa, true, true));
    out.Write("} vs[%d];\n", vertex_in);

    out.Write("VARYING_LOCATION(0) out VertexData {\n");
    GenerateVSOutputMembers<ShaderCode>(
        out, ApiType, uid_data->numTexGens, uid_data->pixel_lighting,
        GetInterpolationQualifier(uid_data->msaa, uid_data->ssaa, false, true));

    if (uid_data->stereo)
      out.Write("\tflat int layer;\n");

    out.Write("} ps;\n");

    out.Write("void main()\n{\n");
  }
  else  // D3D
  {
    out.Write("struct VertexData {\n");
    out.Write("\tVS_OUTPUT o;\n");

    if (uid_data->stereo)
      out.Write("\tuint layer : SV_RenderTargetArrayIndex;\n");

    out.Write("};\n");

    if (g_ActiveConfig.backend_info.bSupportsGSInstancing)
    {
      out.Write("[maxvertexcount(%d)]\n[instance(%d)]\n", vertex_out, uid_data->stereo ? 2 : 1);
      out.Write("void main(%s VS_OUTPUT o[%d], inout %sStream<VertexData> output, in uint "
                "InstanceID : SV_GSInstanceID)\n{\n",
                primitives_d3d[uid_data->primitive_type], vertex_in,
                uid_data->wireframe ? "Line" : "Triangle");
    }
    else
    {
      out.Write("[maxvertexcount(%d)]\n", uid_data->stereo ? vertex_out * 2 : vertex_out);
      out.Write("void main(%s VS_OUTPUT o[%d], inout %sStream<VertexData> output)\n{\n",
                primitives_d3d[uid_data->primitive_type], vertex_in,
                uid_data->wireframe ? "Line" : "Triangle");
    }

    out.Write("\tVertexData ps;\n");
  }

  if (uid_data->primitive_type == PRIMITIVE_LINES)
  {
    if (ApiType == APIType::OpenGL || ApiType == APIType::Vulkan)
    {
      out.Write("\tVS_OUTPUT start, end;\n");
      AssignVSOutputMembers(out, "start", "vs[0]", uid_data->numTexGens, uid_data->pixel_lighting);
      AssignVSOutputMembers(out, "end", "vs[1]", uid_data->numTexGens, uid_data->pixel_lighting);
    }
    else
    {
      out.Write("\tVS_OUTPUT start = o[0];\n");
      out.Write("\tVS_OUTPUT end = o[1];\n");
    }

    // GameCube/Wii's line drawing algorithm is a little quirky. It does not
    // use the correct line caps. Instead, the line caps are vertical or
    // horizontal depending the slope of the line.
    out.Write("\tfloat2 offset;\n"
              "\tfloat2 to = abs(end.pos.xy / end.pos.w - start.pos.xy / start.pos.w);\n"
              // FIXME: What does real hardware do when line is at a 45-degree angle?
              // FIXME: Lines aren't drawn at the correct width. See Twilight Princess map.
              "\tif (" I_LINEPTPARAMS ".y * to.y > " I_LINEPTPARAMS ".x * to.x) {\n"
              // Line is more tall. Extend geometry left and right.
              // Lerp LineWidth/2 from [0..VpWidth] to [-1..1]
              "\t\toffset = float2(" I_LINEPTPARAMS ".z / " I_LINEPTPARAMS ".x, 0);\n"
              "\t} else {\n"
              // Line is more wide. Extend geometry up and down.
              // Lerp LineWidth/2 from [0..VpHeight] to [1..-1]
              "\t\toffset = float2(0, -" I_LINEPTPARAMS ".z / " I_LINEPTPARAMS ".y);\n"
              "\t}\n");
  }
  else if (uid_data->primitive_type == PRIMITIVE_POINTS)
  {
    if (ApiType == APIType::OpenGL || ApiType == APIType::Vulkan)
    {
      out.Write("\tVS_OUTPUT center;\n");
      AssignVSOutputMembers(out, "center", "vs[0]", uid_data->numTexGens, uid_data->pixel_lighting);
    }
    else
    {
      out.Write("\tVS_OUTPUT center = o[0];\n");
    }

    // Offset from center to upper right vertex
    // Lerp PointSize/2 from [0,0..VpWidth,VpHeight] to [-1,1..1,-1]
    out.Write("\tfloat2 offset = float2(" I_LINEPTPARAMS ".w / " I_LINEPTPARAMS
              ".x, -" I_LINEPTPARAMS ".w / " I_LINEPTPARAMS ".y) * center.pos.w;\n");
  }

  if (uid_data->stereo)
  {
    // If the GPU supports invocation we don't need a for loop and can simply use the
    // invocation identifier to determine which layer we're rendering.
    if (g_ActiveConfig.backend_info.bSupportsGSInstancing)
      out.Write("\tint eye = InstanceID;\n");
    else
      out.Write("\tfor (int eye = 0; eye < 2; ++eye) {\n");
  }

  if (uid_data->wireframe)
    out.Write("\tVS_OUTPUT first;\n");

  out.Write("\tfor (int i = 0; i < %d; ++i) {\n", vertex_in);

  if (ApiType == APIType::OpenGL || ApiType == APIType::Vulkan)
  {
    out.Write("\tVS_OUTPUT f;\n");
    AssignVSOutputMembers(out, "f", "vs[i]", uid_data->numTexGens, uid_data->pixel_lighting);

    if (g_ActiveConfig.backend_info.bSupportsDepthClamp &&
        DriverDetails::HasBug(DriverDetails::BUG_BROKENCLIPDISTANCE))
    {
      // On certain GPUs we have to consume the clip distance from the vertex shader
      // or else the other vertex shader outputs will get corrupted.
      out.Write("\tf.clipDist0 = gl_in[i].gl_ClipDistance[0];\n");
      out.Write("\tf.clipDist1 = gl_in[i].gl_ClipDistance[1];\n");
    }
  }
  else
  {
    out.Write("\tVS_OUTPUT f = o[i];\n");
  }

  if (uid_data->stereo)
  {
    // Select the output layer
    out.Write("\tps.layer = eye;\n");
    if (ApiType == APIType::OpenGL || ApiType == APIType::Vulkan)
      out.Write("\tgl_Layer = eye;\n");

    // For stereoscopy add a small horizontal offset in Normalized Device Coordinates proportional
    // to the depth of the vertex. We retrieve the depth value from the w-component of the projected
    // vertex which contains the negated z-component of the original vertex.
    // For negative parallax (out-of-screen effects) we subtract a convergence value from
    // the depth value. This results in objects at a distance smaller than the convergence
    // distance to seemingly appear in front of the screen.
    // This formula is based on page 13 of the "Nvidia 3D Vision Automatic, Best Practices Guide"
    out.Write("\tfloat hoffset = (eye == 0) ? " I_STEREOPARAMS ".x : " I_STEREOPARAMS ".y;\n");
    out.Write("\tf.pos.x += hoffset * (f.pos.w - " I_STEREOPARAMS ".z);\n");
  }

  if (uid_data->primitive_type == PRIMITIVE_LINES)
  {
    out.Write("\tVS_OUTPUT l = f;\n"
              "\tVS_OUTPUT r = f;\n");

    out.Write("\tl.pos.xy -= offset * l.pos.w;\n"
              "\tr.pos.xy += offset * r.pos.w;\n");

    out.Write("\tif (" I_TEXOFFSET "[2] != 0) {\n");
    out.Write("\tfloat texOffset = 1.0 / float(" I_TEXOFFSET "[2]);\n");

    for (unsigned int i = 0; i < uid_data->numTexGens; ++i)
    {
      out.Write("\tif (((" I_TEXOFFSET "[0] >> %d) & 0x1) != 0)\n", i);
      out.Write("\t\tr.tex%d.x += texOffset;\n", i);
    }
    out.Write("\t}\n");

    EmitVertex(out, uid_data, "l", ApiType, true);
    EmitVertex(out, uid_data, "r", ApiType);
  }
  else if (uid_data->primitive_type == PRIMITIVE_POINTS)
  {
    out.Write("\tVS_OUTPUT ll = f;\n"
              "\tVS_OUTPUT lr = f;\n"
              "\tVS_OUTPUT ul = f;\n"
              "\tVS_OUTPUT ur = f;\n");

    out.Write("\tll.pos.xy += float2(-1,-1) * offset;\n"
              "\tlr.pos.xy += float2(1,-1) * offset;\n"
              "\tul.pos.xy += float2(-1,1) * offset;\n"
              "\tur.pos.xy += offset;\n");

    out.Write("\tif (" I_TEXOFFSET "[3] != 0) {\n");
    out.Write("\tfloat2 texOffset = float2(1.0 / float(" I_TEXOFFSET
              "[3]), 1.0 / float(" I_TEXOFFSET "[3]));\n");

    for (unsigned int i = 0; i < uid_data->numTexGens; ++i)
    {
      out.Write("\tif (((" I_TEXOFFSET "[1] >> %d) & 0x1) != 0) {\n", i);
      out.Write("\t\tll.tex%d.xy += float2(0,1) * texOffset;\n", i);
      out.Write("\t\tlr.tex%d.xy += texOffset;\n", i);
      out.Write("\t\tur.tex%d.xy += float2(1,0) * texOffset;\n", i);
      out.Write("\t}\n");
    }
    out.Write("\t}\n");

    EmitVertex(out, uid_data, "ll", ApiType, true);
    EmitVertex(out, uid_data, "lr", ApiType);
    EmitVertex(out, uid_data, "ul", ApiType);
    EmitVertex(out, uid_data, "ur", ApiType);
  }
  else
  {
    EmitVertex(out, uid_data, "f", ApiType, true);
  }

  out.Write("\t}\n");

  EndPrimitive(out, uid_data, ApiType);

  if (uid_data->stereo && !g_ActiveConfig.backend_info.bSupportsGSInstancing)
    out.Write("\t}\n");

  out.Write("}\n");

  return out;
}
コード例 #13
0
ファイル: Grid.cpp プロジェクト: fxtentacle/game-of-life-3d
 inline void EmitPoint(float cx, float cy, float cz, float siz) {
     Vec3 centerTex(cx,cy,cz);
     Vec3 centerPosition = centerTex * 32.0 - Vec3(16.0);
     siz*=0.5;
     
     normal = Vec3(0,0,-1, 0);
     texFace = Vec3(0,0,0,0);
     pos = centerPosition + siz* Vec3(-1, -1, -1);
     
     EmitVertex();
     texFace = Vec3(1,0,0,0);
     pos = centerPosition + siz* Vec3(1, -1, -1);
     
     EmitVertex();
     texFace = Vec3(0,1,0,0);
     pos = centerPosition + siz* Vec3(-1, 1, -1);
     
     EmitVertex();
     texFace = Vec3(1,1,0,0);
     pos = centerPosition + siz* Vec3(1, 1, -1);
     
     EmitVertex();
     EndPrimitive();
     
     normal = Vec3(0,0,1, 0);
     texFace = Vec3(0,0,0,0);
     pos = centerPosition + siz* Vec3(-1, -1, 1);
     
     EmitVertex();
     texFace = Vec3(1,0,0,0);
     pos = centerPosition + siz* Vec3(1, -1, 1);
     
     EmitVertex();
     texFace = Vec3(0,1,0,0);
     pos = centerPosition + siz* Vec3(-1, 1, 1);
     
     EmitVertex();
     texFace = Vec3(1,1,0,0);
     pos = centerPosition + siz* Vec3(1, 1, 1);
     
     EmitVertex();
     EndPrimitive();
     
     
     
     normal = Vec3(-1,0,0, 0);
     texFace = Vec3(0,0,0,0);
     pos = centerPosition + siz* Vec3(-1, -1, -1);
     
     EmitVertex();
     texFace = Vec3(1,0,0,0);
     pos = centerPosition + siz* Vec3(-1, -1, 1);
     
     EmitVertex();
     texFace = Vec3(0,1,0,0);
     pos = centerPosition + siz* Vec3(-1, 1, -1);
     
     EmitVertex();
     texFace = Vec3(1,1,0,0);
     pos = centerPosition + siz* Vec3(-1, 1, 1);
     
     EmitVertex();
     EndPrimitive();
     
     normal = Vec3(1,0,0, 0);
     texFace = Vec3(0,0,0,0);
     pos = centerPosition + siz* Vec3(1, -1, -1);
     
     EmitVertex();
     texFace = Vec3(1,0,0,0);
     pos = centerPosition + siz* Vec3(1, -1, 1);
     
     EmitVertex();
     texFace = Vec3(0,1,0,0);
     pos = centerPosition + siz* Vec3(1, 1, -1);
     
     EmitVertex();
     texFace = Vec3(1,1,0,0);
     pos = centerPosition + siz* Vec3(1, 1, 1);
     
     EmitVertex();
     EndPrimitive();
     
     
     
     normal = Vec3(0,1,0, 0);
     texFace = Vec3(0,0,0,0);
     pos = centerPosition + siz* Vec3(-1, 1, -1);
     
     EmitVertex();
     texFace = Vec3(1,0,0,0);
     pos = centerPosition + siz* Vec3(-1, 1, 1);
     
     EmitVertex();
     texFace = Vec3(0,1,0,0);
     pos = centerPosition + siz* Vec3(1, 1, -1);
     
     EmitVertex();
     texFace = Vec3(1,1,0,0);
     pos = centerPosition + siz* Vec3(1, 1, 1);
     
     EmitVertex();
     EndPrimitive();
     
     
     normal = Vec3(0,-1,0, 0);
     texFace = Vec3(0,0,0,0);
     pos = centerPosition + siz* Vec3(-1, -1, -1);
     
     EmitVertex();
     texFace = Vec3(1,0,0,0);
     pos = centerPosition + siz* Vec3(-1, -1, 1);
     
     EmitVertex();
     texFace = Vec3(0,1,0,0);
     pos = centerPosition + siz* Vec3(1, -1, -1);
     
     EmitVertex();
     texFace = Vec3(1,1,0,0);
     pos = centerPosition + siz* Vec3(1, -1, 1);
     
     EmitVertex();
     EndPrimitive();
 }
コード例 #14
0
void main()
{
    mat4 matModelViewProjection = matrixCameraToClip * matrixModelToCamera;

    // how large (in meters) is one cell?
    vec3 cellSize = vec3(
      (boundingBoxMax.x - boundingBoxMin.x) / gridCellCount.x,
      (boundingBoxMax.y - boundingBoxMin.y) / gridCellCount.y,
      (boundingBoxMax.z - boundingBoxMin.z) / gridCellCount.z
    );

    // Given the cell hash (=gl_PrimitiveIDIn), whats the 3d-grid-coordinate of the cell's center?
    // This is the reverse of particleskernel.cu -> calcGridHash(int3 gridCell).
    ivec3 gridCellCoordinate = ivec3(
      floor(mod(gl_PrimitiveIDIn, gridCellCount.x)),
      floor(mod(gl_PrimitiveIDIn, gridCellCount.x * gridCellCount.y) / gridCellCount.x),
      floor(mod(gl_PrimitiveIDIn, gridCellCount.x * gridCellCount.y * gridCellCount.z) / (gridCellCount.x * gridCellCount.y))
    );

    vec3 posCenterOfCell = vec3(
      boundingBoxMin.x + (cellSize.x * gridCellCoordinate.x) + (cellSize.x / 2.0),
      boundingBoxMin.y + (cellSize.y * gridCellCoordinate.y) + (cellSize.y / 2.0),
      boundingBoxMin.z + (cellSize.z * gridCellCoordinate.z) + (cellSize.z / 2.0)
    );

    vec4 cameraPosition = inverse(matrixModelToCamera) * vec4(0,0,0,1);

    vec3 toCamera = normalize(cameraPosition.xyz - posCenterOfCell);

    float quadSize = min(min(cellSize.x/2, cellSize.y/2),cellSize.z/2) * quadSizeFactor;

    vec3 upWorld = vec3(0.0, 1.0, 0.0);

    vec3 right = normalize(-cross(toCamera, upWorld)) * quadSize;
    vec3 up = normalize(cross(toCamera, normalize(-right))) * quadSize;

    // Don't draw completely transparent boxes. For some reason, this also fixes the problem of the quads disappearing at certain viewing-angles.
    if(cellvalue[0] == 0.0) return;
    
    float alpha = cellvalue[0];
    if(alphaExponentiation != 1.0) alpha = pow(alpha, alphaExponentiation);
    if(alpha < 1.0 && alpha > 0.993) alpha = 0.4; // show dilated cells with obvious difference.
    if(alpha > 1.0) alpha = 1.0;
    alpha *= alphaMultiplication;
    //if(cellvalue[0] > 0) alpha = 1.0;

    vec4 outColor = fixedColor;
    outColor.a = alpha;

    // bottom left
    posCenterOfCell -= right;
    posCenterOfCell -= up;
    gl_Position = matModelViewProjection * vec4(posCenterOfCell, 1.0);
    //texureCoordinate = vec2(-1.0, -1.0);
    colorGS_to_FS = outColor;
    EmitVertex();

    // top left
    posCenterOfCell += up*2;
    gl_Position = matModelViewProjection * vec4(posCenterOfCell, 1.0);
    //texureCoordinate = vec2(-1.0, 1.0);
    colorGS_to_FS = outColor;
    EmitVertex();

    // bottom right
    posCenterOfCell -= up*2;
    posCenterOfCell += right * 2;
    gl_Position = matModelViewProjection * vec4(posCenterOfCell, 1.0);
    //texureCoordinate = vec2(1.0, -1.0);
    colorGS_to_FS = outColor;
    EmitVertex();

    // top right
    posCenterOfCell += up*2;
    gl_Position = matModelViewProjection * vec4(posCenterOfCell, 1.0);
    //texureCoordinate = vec2(1.0, 1.0);
    colorGS_to_FS = outColor;
    EmitVertex();

    EndPrimitive();
}
コード例 #15
0
ファイル: geo.c プロジェクト: cookiehunter/Engine
void emit(vec4 vertex) {
	gl_Position = vertex;
	EmitVertex();
}