Esempio n. 1
0
//--------------------------------------------------------------
void ofApp::setup(){


    ofDirectory dir;
    dir.allowExt("mov");
    dir.listDir(ofToDataPath("."));
    for (int i=0;i<dir.numFiles();i++) {
        memories.push_back(dir.getName(i));        
    }
    
    recSound.loadSound("camera memory sound 10 sec 48.wav");
    ambientSound.loadSound("ambi2.wav");
    ambientSound.setLoop(true);
    ambientSound.play();
    
    
    ofSetWindowShape(STAGE_WIDTH, STAGE_HEIGHT);
    
    ofDisableArbTex();
    
    for (int i=0;i<CAMERAS_NUMBER;i++) {
        cam[i].params.setName("cam"+ofToString(i));
        cam[i].params.add(cam[i].minEdge0.set("minEdge0", 0.0, 0.0, 1.0));
        cam[i].params.add(cam[i].maxEdge0.set("maxEdge0", 1.0, 0.0, 1.0));
        cam[i].params.add(cam[i].minEdge1.set("minEdge1", 0.0, 0.0, 2.0));
        cam[i].params.add(cam[i].maxEdge1.set("maxEdge1", 2.0, 0.0, 2.0));
        cam[i].params.add(cam[i].position.set("position", ofVec3f(0), ofVec3f(-MAX_POSITION), ofVec3f(MAX_POSITION)));
        cam[i].params.add(cam[i].cameraRotation.set("cameraRotation", ofVec3f(0), ofVec3f(-180), ofVec3f(180)));
        cam[i].params.add(cam[i].sceneRotation.set("sceneRotation", ofVec3f(0), ofVec3f(-180), ofVec3f(180)));
        
    }
    
    gui.setup("panel");
    gui.add(fps.set("fps",""));
    gui.add(ambLevel.set("ambLevel", 0.5, 0.0, 1.0));
    gui.add(recLevel.set("recLevel", 0.5, 0.0, 1.0));
    gui.add(pointSize.set("pointSize",3,1,10));
    gui.add(gridScale.set("gridScale", -2.0, -5.0, 5.0)); // ( -1 for linux64)
    gui.add(gridOffset.set("gridOffset", -1.0, -2.0, 2.0)); // ( -1 for linux64)
    for (int i=0;i<CAMERAS_NUMBER;i++) {
        gui.add(cam[i].params);
    }
    
    
    gui.add(depthScale.set("depthScale", -5, -10.0, 0.0)); // 10^-5
    gui.add(tolerance.set("tolerance", 0.1, 0.0, 1.0));
    gui.add(decay0.set("decay0", 0.9, 0.9, 1.0));
    gui.add(decay1.set("decay1", 0.9, 0.9, 1.0));
    gui.add(strobeRate.set("strobeRate", 15, 1, 30));
    gui.add(variance.set("variance", .143,0,10));
    gui.add(radius.set("radius", 7,0,20)); // fps drop above 14
    gui.add(hueRate.set("hueRate", 0.0,0.0,1.0));
    gui.add(sat.set("sat", 0.0,0.0,1.0));
    gui.add(offset.set("offset", 0.0,-0.5,0.5));
    
    gui.add(minArea.set("minArea",0.05,0,0.1));
    gui.add(maxArea.set("maxArea", 0.5, 0, 1));
    gui.add(blobDetection.set("blobDetection",false));
    gui.add(recordTime.set("recordTime",""));
    gui.add(waitTime.set("waitTime",""));
    gui.add(recordDuration.set("recordDuration",10,10,30));
    gui.add(minimumDuration.set("minimumDuration",3,0,5));
//    gui.add(freezeDuration.set("freezeDuration",3,0,5));
    gui.add(waitDuration.set("waitDuration",2,0,10));
    gui.add(idleInterval.set("idleInterval",5,2,10));
    
    gui.add(videoQueue.set("videoQueue",""));
    gui.loadFromFile("settings.xml");
    
    recorder.setPixelFormat("gray");
    
#ifdef TARGET_OSX
    
    
    //    recorder.setFfmpegLocation("~/ffmpeg");
    recorder.setVideoCodec("mpeg4");
    recorder.setVideoBitrate("800k");
    
    
   
#else
    
    recorder.setFfmpegLocation(ofFilePath::getAbsolutePath("ffmpeg"));
    
#endif
    ofxOpenNI2::init();
    vector<string> devices = ofxOpenNI2::listDevices();
    
//    if (devices.size()<2) {
//        std:exit();
//    }
    
    for (int i=0;i<CAMERAS_NUMBER;i++) {
        cam[i].sensor.setup(devices[i]);
        cam[i].sensor.setDepthMode(5);
        cam[i].background.allocate(cam[i].sensor.depthWidth, cam[i].sensor.depthHeight, 1);
    }
    
    
    createCloudShader(cloudShader);
    
    ofFbo::Settings s;
    s.width = STAGE_WIDTH;
    s.height = STAGE_HEIGHT;
    s.internalformat = GL_R16; // GL_R8 is not used in ofGetImageTypeFromGLType()
    
    depthFbo.allocate(s);
    camFbo.allocate(s);
    camFbo.begin();
    ofClear(0);
    camFbo.end();
    

//    createDepthBackgroundSubtractionShader(subtractShader);
//    subtractShader.begin();
//    subtractShader.setUniformTexture("bgTex", backgroundFbo.getTextureReference(), 1);
//    subtractShader.end();
    
    
    string fragment = STRINGIFY(
                                \n#version 150\n
                                
                                uniform sampler2D tex0;
                                uniform sampler2D memTex;
                                
                                
                                uniform int frameNum;
                                uniform int strobeRate;
                                uniform float decay;
                                uniform float sat;
                                uniform float hue;
                                uniform float offset;
                                
                                in vec2 texCoordVarying;
                                out vec4 fragColor;
                                
                                
                                
                                
                                void main(void) {
                                    float c = texture(tex0,texCoordVarying).r;
                                    vec3 mem = texture(memTex,texCoordVarying).rgb;
                                    
                                    bool f = (frameNum % strobeRate) == 0;
                                    
                                    
                                    float lgt = c+offset;
//                                    float hue = float(frameNum/10 % 256)/255.0;
                                    float x= (1-abs(2*lgt-1))*sat;
                                    
                                    float h = fract(hue);
                                    vec3 y;
                                    y.r = abs(h * 6 - 3) - 1;
                                    y.g = 2 - abs(h * 6 - 2);
                                    y.b = 2 - abs(h * 6 - 4);
                                    
                                    vec3 col = (clamp(y,0,1)-0.5)*x+lgt;
                                    
                                    fragColor = vec4(mix(mem*decay,col,vec3(f && c>0)),1.0);
                                }
Esempio n. 2
0
TEST_FIXTURE(FractionTestFixture, CreateWithConstructor)
{
    Fraction fract(5, 8);
    CHECK_EQUAL(5, fract.get_num());
    CHECK_EQUAL(8, fract.get_den());
}
Esempio n. 3
0
 fract operator= (const fract &fr) {
   return fract(this -> numerator = fr.numerator, this->denominator = fr.denominator);
   
 }
float hash( float n )
{
    return fract(sin(n)*43758.5453123);
}
Esempio n. 5
0
static void TextureUV(LWTexture *tx,LWPoint *spot,LWUV *uv, LWPoint *tnorm)
{
   float t,lon,lat;

   spot->x -= ( float ) tx->xTextureCenter;
   spot->y -= ( float ) tx->yTextureCenter;
   spot->z -= ( float ) tx->zTextureCenter;

   if ( tx->textureType == TT_CYLINDRICAL )
   {
      if ( tx->textureAxis == TA_X ) {
         xyztoh( spot->z, spot->x, -spot->y, &lon );
         t = ( float )( -spot->x / tx->xTextureSize + 0.5f );
      }
      else if ( tx->textureAxis == TA_Y ) {
         xyztoh( -spot->x, spot->y, spot->z, &lon );
         t = ( float )( -spot->y / tx->yTextureSize + 0.5f );
      }
      else {
         xyztoh( -spot->x, spot->z, -spot->y, &lon );
         t = ( float )( -spot->z / tx->zTextureSize + 0.5f );
      }

      while ( t < 0 ) t += 1.0f;

      /* --- lon is in [0, 2PI], so we need to change this
      lon = 1.0f - ( lon + PI ) / TWOPI;   [EW 6 Jun 00] --- */
      lon = 1.0f - lon / TWOPI;

      if ( tx->wTiles != 1.0f )
         lon = ( float ) fract( lon * tx->wTiles );

      uv->u = lon;
      uv->v = t;
   }

   else if ( tx->textureType == TT_SPHERICAL )
   {
      if ( tx->textureAxis == TA_X )
         xyztohp( spot->z, spot->x, -spot->y, &lon, &lat );

      else if ( tx->textureAxis == TA_Y )
         xyztohp( -spot->x, spot->y, spot->z, &lon, &lat );

      else
         xyztohp( -spot->x, spot->z, -spot->y, &lon, &lat );

      /* --- lon is in [0, 2PI], so we need to change this
      lon = 1.0f - ( lon + PI ) / TWOPI;   [EW 6 Jun 00] --- */
      lon = 1.0f - lon / TWOPI;

      lat = 0.5f - lat / PI;

      if ( tx->wTiles != 1.0f )
         lon = ( float ) fract( lon * tx->wTiles );
      if ( tx->hTiles != 1.0f )
         lat = ( float ) fract( lat * tx->hTiles );

      uv->u = ( lon );
      uv->v = ( lat );
   }

   else   // TT_CUBIC or TT_PLANAR
   {
      if ( tx->textureType == TT_CUBIC )
         tx->textureAxis = CubicTextureAxis( tnorm->x, tnorm->y, tnorm->z );

      uv->u = ( float )(( tx->textureAxis == TA_X ) ?
         ( spot->z / tx->zTextureSize ) + 0.5f :
         ( spot->x / tx->xTextureSize ) + 0.5f );

      uv->v = ( float )(( tx->textureAxis == TA_Y) ?
         ( -( spot->z / tx->zTextureSize )) + 0.5f :
         ( -( spot->y / tx->yTextureSize )) + 0.5f );
   }
}
Esempio n. 6
0
void UVMapper::getUV(XYZ &point_in, UV &uv_out)
{
	/* s, t = texture space coordinates, lon, lat = longitude and latitude space coordinates */
	float s,t,lon,lat;
	
	XYZ uvpoint = point_in;

	uvpoint -= center;	/* shift point by centerpoint */

	/* apply our rotation (if there is one) */
	if (rotation.x || rotation.y || rotation.z)
	{
		XYZ npoint;
		rotatexyz(rotation,point_in,npoint);
		uvpoint = npoint;
	}

	/* check the projection mode and map accordingly */
	switch (projection_mode)
	{
	
	case UV_PROJECTION_PLANAR:
				/* planar, just figure out the axis and return the point from that plane & apply scale */
        s = ((projection_axis == UV_AXIS_X) ? uvpoint.z / scale.z + 0.5f : -uvpoint.x / scale.x + 0.5f);
        t = ((projection_axis == UV_AXIS_Y) ? uvpoint.z / scale.z + 0.5f : uvpoint.y / scale.y + 0.5f);
        uv_out.u = fract(s);
        uv_out.v = fract(t);
		break;

	case UV_PROJECTION_CYLINDRICAL:

		/* Cylindrical is a little more tricky, we map based on the degree around the center point */
		switch (projection_axis)
		{

			case UV_AXIS_X:
				/* xyz_to_h takes the point and returns a value representing the 'unwrapped' height position of this point */
				xyz_to_h(uvpoint.z,uvpoint.x,-uvpoint.y,&lon);
				t = -uvpoint.x / scale.x + 0.5f;
				break;

			case UV_AXIS_Y:
		    xyz_to_h(-uvpoint.x,uvpoint.y,uvpoint.z,&lon);
		    t = -uvpoint.y / scale.y + 0.5f;
				break;

	    case UV_AXIS_Z:
	      xyz_to_h(-uvpoint.x,uvpoint.z,-uvpoint.y,&lon);
	      t = -uvpoint.z / scale.z + 0.5f;
				break;

		}

		/* convert it from radian space to texture space 0 to 1 * wrap, TWO_PI = 360 degrees */
    lon = 1.0f - lon / M_TWO_PI;

    if (wrap_w_count != 1.0) lon = fract(lon) * wrap_w_count;
    uv_out.u = fract(lon);
    uv_out.v = fract(t);

		break;

	case UV_PROJECTION_SPHERICAL:
				
		/* spherical is similar to cylindrical except we also unwrap the 'width' */
		switch(projection_axis)
		{
			case UV_AXIS_X:
				/* xyz to hp takes the point value and 'unwraps' the latitude and longitude that projects to that point */
        xyz_to_hp(uvpoint.z,uvpoint.x,-uvpoint.y,&lon,&lat);
        break;
      case UV_AXIS_Y:
        xyz_to_hp(uvpoint.x,-uvpoint.y,uvpoint.z,&lon,&lat);
      	break;
      case UV_AXIS_Z:
        xyz_to_hp(-uvpoint.x,uvpoint.z,-uvpoint.y,&lon,&lat);
				break;
		}
    
    /* convert longitude and latitude to texture space coordinates, multiply by wrap height and width */
		lon = 1.0f - lon / M_TWO_PI;
    lat = 0.5f - lat / M_PI;

		if (wrap_w_count != 1.0f) lon = fract(lon) * wrap_w_count;
		if (wrap_h_count != 1.0f) lat = fract(lat) * wrap_h_count;
		
		uv_out.u = fract(lon);
		uv_out.v = fract(lat);
		
	break;

	case UV_PROJECTION_UV:
		// not handled here..
	break;

	default:	// else mapping cannot be handled here (i.e. cubic), this shouldn't have happened
			uv_out.u = 0;
			uv_out.v = 0;
		break;


	}
};
Esempio n. 7
0
static inline vec3 interpolatePath(std::vector<vec3> &path, float pos)
{
	float vertIdx = pos * (path.size() - 1);
	int i = int(floor(vertIdx));
	return mix(path[i], path[i + 1], fract(vertIdx));
}
Esempio n. 8
0
KFR_SINTRIN T trianglenorm(T x)
{
    return rawtriangle(fract(x + 0.25));
}
Esempio n. 9
0
KFR_SINTRIN T sinenorm(T x)
{
    return rawsine(fract(x));
}
Esempio n. 10
0
void CubewallScene::_draw(float time)
{
	int textBitmapWidthL = min(textBitmapWidth, (int)(time*30.f));

	time -= SceneTime::outStorm2cubewall;

	float fadeIn = min(1.0f, 1.f+time*0.33f);
	bool fadeActive = (fadeIn < 1.0);

	if (!fadeActive)
	{
		fboOverlay->bind();
	}

	float fade2 = max(0.f, min(1.f, time));

	setBlendMode(NO_BLEND);
	setCullMode(NO_CULL);
	setDepthMode(NO_DEPTH);
	glClearDepth(1.0f);

	glClearColor(56.f/256.f * fade2, 60.f/256.f * fade2, 50.f/256.f * fade2, 1.0f);
	glClear((fadeActive ? 0 : GL_COLOR_BUFFER_BIT) | GL_DEPTH_BUFFER_BIT);

	float ctime = max(0.f, time);
	int cctime = clamp((int)(gMusic->getBeat() / 16) - int(SceneTime::cubewallStart / BEAT / 16), 0, 6); //6 Teile,Schluß

	vec3 eye(-1.5*((cctime & 2) ? -1.f : 1.f), 0.5, (cctime & 1) ? 1. : (2. + 0.95*cos(ctime*0.2) + ctime*ctime*0.005));
	vec3 center(0, -0.1 + 0.2*sin(time*0.15) + 0.5*atan(time*0.1), 0);
	vec3 up(0, 1, 0);
	mat4 view = lookAt(eye, center, up);
	mat4 proj = perspective<float>(30.f + 10.f*(float)cos(time*0.1), demo.aspect, 0.001f, 100.f);
	mat4 projView = proj*view;

	floorClearShader->bind();

	setBlendMode(BLEND_ALPHA);
	floorClearShader->uniform("u_fade", fadeIn);
	floorClearShader->uniform("u_matrix", projView);
	drawArray(GL_TRIANGLE_FAN, floorVerts, NULL, NULL, NULL, NULL, sizeof(floorVerts) / (3 * sizeof(float)));

	floorClearShader->unbind();


	setDepthMode(DEPTH_FULL);
	setBlendMode(NO_BLEND);

	cubeShader->bind();

	cubeShader->uniform("u_eye", eye);
	cubeShader->uniform("u_matrix", projView);

	float cubeScaleFade = clamp((Duration() - time - 4.0f) / 1.0f, 0.0f, 1.0f);

	//ab 179: Kötzchen weg
	float explosionTime = max(0.f, time - (179.f - SceneTime::cubewallStart));
	float explosionY = explosionTime*explosionTime;

	float cubeScale = 16.f / (float)textBitmapWidth;

	int cubeInstanceIndex = 0;

	for (int x = 0; x < textBitmapWidthL; x++)
	{
		for (int y = 0; y < textBitmapHeight; y++)
		{
			int cubeIndex = ((textBitmapHeight - 1) - y)*textBitmapWidth + x;
			int pixelPos = cubeIndex * 4;
			int   a = textBitmap[pixelPos + 3];
			if (a == 0) continue;
			float r = (textBitmap[pixelPos]   * a) * (1.f/(255.f*255.f));
			float g = (textBitmap[pixelPos+1] * a) * (1.f/(255.f*255.f));
			float b = (textBitmap[pixelPos+2] * a) * (1.f/(255.f*255.f));

			float random = randomValues[cubeIndex];
			float cubeShiftAmp = 1.f - fract(gMusic->getBeat());// fmod(time * 2 * 1.2f, 1.f);
			float cubeShift = (0.5f - random) * cubeShiftAmp * 2;

			vec4 color(r, g, b, 1.f);
			color *= (1. - (0.5*cubeShift) + cubeShift*random);
			float highlightRandom = randomValues[((textBitmapHeight - 1) - y)*textBitmapWidth + (x / 3 + (int)(gMusic->getBeat() / 4)) % (textBitmapWidth*textBitmapHeight)];
			float highlight = pow(highlightRandom, 10) * 10;
			color *= 1. + highlight;

			for (int mirror = 0; mirror < 2; mirror++)
			{
				vec3 pos(x*cubeScale + time*-0.65f + 1.5f, (y*cubeScale + 0.05f) * (mirror ? -1 : 1), cubeShift*0.5f*cubeScale);
				pos.y -= explosionY*(1.f+0.1f*random);
				pos.x += explosionTime*sin(10.f*random);

				cubePosScale[cubeInstanceIndex] = vec4(pos, cubeScaleFade * 0.4f*cubeScale);
				cubeCol     [cubeInstanceIndex] = color;
				cubeInstanceIndex++;

				if (cubeInstanceIndex == 510) //TODO: allokierter Buffer für alle Werte statt der nur 1024 Konstanten-Register
				{
					cubeShader->uniform("u_posScale", cubePosScale, cubeInstanceIndex);
					cubeShader->uniform("u_color", cubeCol, cubeInstanceIndex);
					drawArray(GL_TRIANGLE_STRIP, semicube, NULL, NULL, NULL, NULL, sizeof(semicube) / (3 * sizeof(float)), cubeInstanceIndex);
					cubeInstanceIndex = 0;
				}
			}
		}
	}

	cubeShader->uniform("u_posScale", cubePosScale, cubeInstanceIndex);
	cubeShader->uniform("u_color", cubeCol, cubeInstanceIndex);
	drawArray(GL_TRIANGLE_STRIP, semicube, NULL, NULL, NULL, NULL, sizeof(semicube) / (3 * sizeof(float)), cubeInstanceIndex);

	cubeShader->unbind();

	floorShader->bind();
	setBlendMode(BLEND_ALPHA);
	floorShader->uniform("u_eye", eye);
	floorShader->uniform("u_matrix", projView);
	floorShader->uniform("u_fade", fadeIn);
	drawArray(GL_TRIANGLE_FAN, floorVerts, NULL, NULL, NULL, NULL, sizeof(floorVerts) / (3 * sizeof(float)));
	floorShader->unbind();

	if (!fadeActive)
	{
		fboOverlay->unbind();

		setBlendMode(NO_BLEND);
		setCullMode(NO_CULL);
		setDepthMode(NO_DEPTH);

		kratzerShader->bind();
		kratzerShader->bindFbo("src", fboOverlay, 0);

		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);

		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
		kratzerShader->bindTexture("tex", kratzer, 1);
		kratzerShader->uniform("u_fade", fade2);
		drawFullscreenQuad();
		kratzerShader->unbind();
	}

	setDepthMode(NO_DEPTH);
	setBlendMode(BLEND_ALPHA);
	setCullMode(NO_CULL);

	if (fadeIn >= 1.0f)
	{
		float creditsStart = 3.f;
		float creditsDuration = (SceneTime::lazorsStart - SceneTime::cubewallStart) - creditsStart;
		float creditTime = (time - creditsStart - 1.f) * 3.f / creditsDuration;
		float pos = floor(creditTime);
		creditsShader->bind();
		creditsShader->bindTexture("tex", credits, 0);
		creditsShader->uniform("fade", 2 * (creditTime - pos));
		drawRect(vec2(-1, -1 + 0.1185), vec2(1, -1 + 2 * 0.1185), vec2(0, (1. / 3.)*(pos + 1)), vec2(1, (1. / 3.)*pos));
		creditsShader->unbind();
	}
}
Esempio n. 11
0
    inline std::string getOpenCLHSV()
    {
#define STRINGIFY(A) #A

      return STRINGIFY(
    void HSVtoRGB(__local uchar4* color, float h, float s, float v) 
{
  float temp;
  h = fract(h, &temp);

  s = clamp(s, 0.0, 1.0);
  v = clamp(v, 0.0, 1.0);

  h = h * 6;

  unsigned int i = h;
  float f = h - i;
  float p = v * (1 - s);
  float q = v * (1 - s * f);
  float t = v * (1 - s * (1 - f));

  float r;
  float g;
  float b;

  switch(i) {
  case 0:	
    r = v;
    g = t;
    b = p;
    break;

  case 1:	
    r = q;
    g = v;
    b = p;
    break;

  case 2:
    r = p;
    g = v;
    b = t;
    break;

  case 3:
    r = p;
    g = q;
    b = v;
    break;

  case 4:
    r = t;
    g = p;
    b = v;
    break;

  case 5:
    r = v;
    g = p;
    b = q;
    break;
  }
  *color = (uchar4)(r*255,g*255,b*255,255);
});
Esempio n. 12
0
	template<typename T> inline vec4<T> fract(vec4<T> const& x)
	{
		return vec4<T>(fract(x.x), fract(x.y), fract(x.z), fract(x.w));
	}
Esempio n. 13
0
void IterateVoxel(inout vec3 voxel, Ray ray, inout vec4 colorAccum)
{
	float maxX = 0.0;
	float maxY = 0.0;
	float maxZ = 0.0;
	
		
	if(ray.Direction.x != 0.0)
	{
		maxX = max((voxel.x - ray.Origin.x) / ray.Direction.x, (voxel.x + 1.0 - ray.Origin.x) / ray.Direction.x);
	}
	if(ray.Direction.y != 0.0)
	{
		maxY = max((voxel.y - ray.Origin.y) / ray.Direction.y, (voxel.y + 1.0 - ray.Origin.y) / ray.Direction.y);
	}
	if(ray.Direction.z != 0.0)
	{
		maxZ = max((voxel.z - ray.Origin.z) / ray.Direction.z, (voxel.z + 1.0 - ray.Origin.z) / ray.Direction.z);
	}

	vec2 hitPoint;
	float texture;
	if(maxX <= min(maxY, maxZ))
	{
		voxel.x += sign(ray.Direction.x);
		int block = GetVoxel(voxel);

		if(block != 0)
		{
			texture = (ray.Direction.x > 0) ? textureFaces[block*6 + 0] : textureFaces[block*6 + 1];
			hitPoint = fract(ray.Origin + ray.Direction * maxX).zy;
			colorAccum = texture2DArray(textures, vec3(1.0 - abs(hitPoint), texture));
			colorAccum.xyz *= 0.9;
		}
	}
	if(maxY <= min(maxX, maxZ))
	{
		voxel.y += sign(ray.Direction.y);
		int block = GetVoxel(voxel);

		if(block != 0)
		{
			texture = (ray.Direction.y > 0) ? textureFaces[block*6 + 3] : textureFaces[block*6 + 2];
			hitPoint = fract(ray.Origin + ray.Direction * maxY).xz;
			colorAccum = texture2DArray(textures, vec3(1.0 - abs(hitPoint), texture));
			colorAccum.xyz *= 1.0;
		}
	}
	if(maxZ <= min(maxX, maxY))
	{
		voxel.z += sign(ray.Direction.z);
		int block = GetVoxel(voxel);

		if(block != 0)
		{
			texture = (ray.Direction.z > 0) ? textureFaces[block*6 + 4] : textureFaces[block*6 + 5];
			hitPoint = fract(ray.Origin + ray.Direction * maxZ).xy;
			colorAccum = texture2DArray(textures, vec3(1.0 - abs(hitPoint), texture));
			colorAccum.xyz *= 0.8;
		}
	}
}
	void COpenGLExtensionHandler::initExtensions(bool stencilBuffer)
	{
		const Real ogl_ver = fast_atof(reinterpret_cast<const c8*>(glGetString(GL_VERSION)));
		Version = static_cast<UINT16>(floor32(ogl_ver) * 100 + round32(fract(ogl_ver)*10.0f));
		if (Version >= 102)
			Printer::log("OpenGL driver version is 1.2 or better.", LML_NORMAL);
		else
			Printer::log("OpenGL driver version is not 1.2 or better.", LML_CRITICAL);

		{
			const char* t = reinterpret_cast<const char*>(glGetString(GL_EXTENSIONS));
			size_t len = 0;
			c8 *str = 0;
			if (t)
			{
				len = strlen(t);
				str = new c8[len + 1];
			}
			c8* p = str;

			for (size_t i = 0; i<len; ++i)
			{
				str[i] = static_cast<char>(t[i]);

				if (str[i] == ' ')
				{
					str[i] = 0;
					for (UINT32 j = 0; j<SAPPHIRE_OpenGL_Feature_Count; ++j)
					{
						if (!strcmp(OpenGLFeatureStrings[j], p))
						{
							FeatureAvailable[j] = true;
							break;
						}
					}

					p = p + strlen(p) + 1;
				}
			}

			delete[] str;
		}

		MultiTextureExtension = FeatureAvailable[SAPPHIRE_ARB_multitexture];
		TextureCompressionExtension = FeatureAvailable[SAPPHIRE_ARB_texture_compression];
		StencilBuffer = stencilBuffer;

#ifdef SAPPHIRE_OPENGL_USE_EXTPOINTER
#if (SAPPHIRE_PLATFORM == SAPPHIRE_PLATFORM_WIN32)
#define SAPPHIRE_OGL_LOAD_EXTENSION(x) wglGetProcAddress(reinterpret_cast<const char*>(x))
#elif defined(_SAPPHIRE_COMPILE_WITH_SDL_DEVICE_) && !defined(_SAPPHIRE_COMPILE_WITH_X11_DEVICE_)
#define SAPPHIRE_OGL_LOAD_EXTENSION(x) SDL_GL_GetProcAddress(reinterpret_cast<const char*>(x))
#else
		// Accessing the correct function is quite complex
		// All libraries should support the ARB version, however
		// since GLX 1.4 the non-ARB version is the official one
		// So we have to check the runtime environment and
		// choose the proper symbol
		// In case you still have problems please enable the
		// next line by uncommenting it
		// #define _SAPPHIRE_GETPROCADDRESS_WORKAROUND_

#ifndef _SAPPHIRE_GETPROCADDRESS_WORKAROUND_
		__GLXextFuncPtr(*SAPPHIRE_OGL_LOAD_EXTENSION_FUNCP)(const GLubyte*) = 0;
#ifdef GLX_VERSION_1_4
		int major = 0, minor = 0;
		if (glXGetCurrentDisplay())
			glXQueryVersion(glXGetCurrentDisplay(), &major, &minor);
		if ((major>1) || (minor>3))
			SAPPHIRE_OGL_LOAD_EXTENSION_FUNCP = glXGetProcAddress;
		else
#endif
			SAPPHIRE_OGL_LOAD_EXTENSION_FUNCP = glXGetProcAddressARB;
#define SAPPHIRE_OGL_LOAD_EXTENSION(X) SAPPHIRE_OGL_LOAD_EXTENSION_FUNCP(reinterpret_cast<const GLubyte*>(X))
#else
#define SAPPHIRE_OGL_LOAD_EXTENSION(X) glXGetProcAddressARB(reinterpret_cast<const GLubyte*>(X))
#endif // workaround
#endif // Windows, SDL, or Linux

		// get multitexturing function pointers
		pGlActiveTextureARB = (PFNGLACTIVETEXTUREARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glActiveTextureARB");
		pGlClientActiveTextureARB = (PFNGLCLIENTACTIVETEXTUREARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glClientActiveTextureARB");

		// get fragment and vertex program function pointers
		pGlGenProgramsARB = (PFNGLGENPROGRAMSARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenProgramsARB");
		pGlGenProgramsNV = (PFNGLGENPROGRAMSNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenProgramsNV");
		pGlBindProgramARB = (PFNGLBINDPROGRAMARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBindProgramARB");
		pGlBindProgramNV = (PFNGLBINDPROGRAMNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBindProgramNV");
		pGlProgramStringARB = (PFNGLPROGRAMSTRINGARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glProgramStringARB");
		pGlLoadProgramNV = (PFNGLLOADPROGRAMNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glLoadProgramNV");
		pGlDeleteProgramsARB = (PFNGLDELETEPROGRAMSARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteProgramsARB");
		pGlDeleteProgramsNV = (PFNGLDELETEPROGRAMSNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteProgramsNV");
		pGlProgramLocalParameter4fvARB = (PFNGLPROGRAMLOCALPARAMETER4FVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glProgramLocalParameter4fvARB");
		pGlCreateShaderObjectARB = (PFNGLCREATESHADEROBJECTARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCreateShaderObjectARB");
		pGlCreateShader = (PFNGLCREATESHADERPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCreateShader");
		pGlShaderSourceARB = (PFNGLSHADERSOURCEARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glShaderSourceARB");
		pGlShaderSource = (PFNGLSHADERSOURCEPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glShaderSource");
		pGlCompileShaderARB = (PFNGLCOMPILESHADERARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCompileShaderARB");
		pGlCompileShader = (PFNGLCOMPILESHADERPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCompileShader");
		pGlCreateProgramObjectARB = (PFNGLCREATEPROGRAMOBJECTARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCreateProgramObjectARB");
		pGlCreateProgram = (PFNGLCREATEPROGRAMPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCreateProgram");
		pGlAttachObjectARB = (PFNGLATTACHOBJECTARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glAttachObjectARB");
		pGlAttachShader = (PFNGLATTACHSHADERPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glAttachShader");
		pGlLinkProgramARB = (PFNGLLINKPROGRAMARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glLinkProgramARB");
		pGlLinkProgram = (PFNGLLINKPROGRAMPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glLinkProgram");
		pGlUseProgramObjectARB = (PFNGLUSEPROGRAMOBJECTARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUseProgramObjectARB");
		pGlUseProgram = (PFNGLUSEPROGRAMPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUseProgram");
		pGlDeleteObjectARB = (PFNGLDELETEOBJECTARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteObjectARB");
		pGlDeleteProgram = (PFNGLDELETEPROGRAMPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteProgram");
		pGlDeleteShader = (PFNGLDELETESHADERPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteShader");
		pGlGetAttachedShaders = (PFNGLGETATTACHEDSHADERSPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetAttachedShaders");
		pGlGetAttachedObjectsARB = (PFNGLGETATTACHEDOBJECTSARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetAttachedObjectsARB");
		pGlGetInfoLogARB = (PFNGLGETINFOLOGARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetInfoLogARB");
		pGlGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetShaderInfoLog");
		pGlGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetProgramInfoLog");
		pGlGetObjectParameterivARB = (PFNGLGETOBJECTPARAMETERIVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetObjectParameterivARB");
		pGlGetShaderiv = (PFNGLGETSHADERIVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetShaderiv");
		pGlGetProgramiv = (PFNGLGETPROGRAMIVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetProgramiv");
		pGlGetUniformLocationARB = (PFNGLGETUNIFORMLOCATIONARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetUniformLocationARB");
		pGlGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetUniformLocation");
		pGlUniform1fvARB = (PFNGLUNIFORM1FVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniform1fvARB");
		pGlUniform2fvARB = (PFNGLUNIFORM2FVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniform2fvARB");
		pGlUniform3fvARB = (PFNGLUNIFORM3FVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniform3fvARB");
		pGlUniform4fvARB = (PFNGLUNIFORM4FVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniform4fvARB");
		pGlUniform1ivARB = (PFNGLUNIFORM1IVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniform1ivARB");
		pGlUniform2ivARB = (PFNGLUNIFORM2IVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniform2ivARB");
		pGlUniform3ivARB = (PFNGLUNIFORM3IVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniform3ivARB");
		pGlUniform4ivARB = (PFNGLUNIFORM4IVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniform4ivARB");
		pGlUniformMatrix2fvARB = (PFNGLUNIFORMMATRIX2FVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniformMatrix2fvARB");
		pGlUniformMatrix3fvARB = (PFNGLUNIFORMMATRIX3FVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniformMatrix3fvARB");
		pGlUniformMatrix4fvARB = (PFNGLUNIFORMMATRIX4FVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUniformMatrix4fvARB");
		pGlGetActiveUniformARB = (PFNGLGETACTIVEUNIFORMARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetActiveUniformARB");
		pGlGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetActiveUniform");

		// get point parameter extension
		pGlPointParameterfARB = (PFNGLPOINTPARAMETERFARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glPointParameterfARB");
		pGlPointParameterfvARB = (PFNGLPOINTPARAMETERFVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glPointParameterfvARB");

		// get stencil extension
		pGlStencilFuncSeparate = (PFNGLSTENCILFUNCSEPARATEPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glStencilFuncSeparate");
		pGlStencilOpSeparate = (PFNGLSTENCILOPSEPARATEPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glStencilOpSeparate");
		pGlStencilFuncSeparateATI = (PFNGLSTENCILFUNCSEPARATEATIPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glStencilFuncSeparateATI");
		pGlStencilOpSeparateATI = (PFNGLSTENCILOPSEPARATEATIPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glStencilOpSeparateATI");

		// compressed textures
		pGlCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCompressedTexImage2D");

		// ARB FrameBufferObjects
		pGlBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBindFramebuffer");
		pGlDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteFramebuffers");
		pGlGenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenFramebuffers");
		pGlCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCheckFramebufferStatus");
		pGlFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glFramebufferTexture2D");
		pGlBindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBindRenderbuffer");
		pGlDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteRenderbuffers");
		pGlGenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenRenderbuffers");
		pGlRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glRenderbufferStorage");
		pGlFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glFramebufferRenderbuffer");
		pGlGenerateMipmap = (PFNGLGENERATEMIPMAPPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenerateMipmap");

		// EXT FrameBufferObjects
		pGlBindFramebufferEXT = (PFNGLBINDFRAMEBUFFEREXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBindFramebufferEXT");
		pGlDeleteFramebuffersEXT = (PFNGLDELETEFRAMEBUFFERSEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteFramebuffersEXT");
		pGlGenFramebuffersEXT = (PFNGLGENFRAMEBUFFERSEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenFramebuffersEXT");
		pGlCheckFramebufferStatusEXT = (PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glCheckFramebufferStatusEXT");
		pGlFramebufferTexture2DEXT = (PFNGLFRAMEBUFFERTEXTURE2DEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glFramebufferTexture2DEXT");
		pGlBindRenderbufferEXT = (PFNGLBINDRENDERBUFFEREXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBindRenderbufferEXT");
		pGlDeleteRenderbuffersEXT = (PFNGLDELETERENDERBUFFERSEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteRenderbuffersEXT");
		pGlGenRenderbuffersEXT = (PFNGLGENRENDERBUFFERSEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenRenderbuffersEXT");
		pGlRenderbufferStorageEXT = (PFNGLRENDERBUFFERSTORAGEEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glRenderbufferStorageEXT");
		pGlFramebufferRenderbufferEXT = (PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glFramebufferRenderbufferEXT");
		pGlGenerateMipmapEXT = (PFNGLGENERATEMIPMAPEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenerateMipmapEXT");
		pGlDrawBuffersARB = (PFNGLDRAWBUFFERSARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDrawBuffersARB");
		pGlDrawBuffersATI = (PFNGLDRAWBUFFERSATIPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDrawBuffersATI");

		// get vertex buffer extension
		pGlGenBuffersARB = (PFNGLGENBUFFERSARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenBuffersARB");
		pGlBindBufferARB = (PFNGLBINDBUFFERARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBindBufferARB");
		pGlBufferDataARB = (PFNGLBUFFERDATAARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBufferDataARB");
		pGlDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteBuffersARB");
		pGlBufferSubDataARB = (PFNGLBUFFERSUBDATAARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBufferSubDataARB");
		pGlGetBufferSubDataARB = (PFNGLGETBUFFERSUBDATAARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetBufferSubDataARB");
		pGlMapBufferARB = (PFNGLMAPBUFFERARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glMapBufferARB");
		pGlUnmapBufferARB = (PFNGLUNMAPBUFFERARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glUnmapBufferARB");
		pGlIsBufferARB = (PFNGLISBUFFERARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glIsBufferARB");
		pGlGetBufferParameterivARB = (PFNGLGETBUFFERPARAMETERIVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetBufferParameterivARB");
		pGlGetBufferPointervARB = (PFNGLGETBUFFERPOINTERVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetBufferPointervARB");
		pGlProvokingVertexARB = (PFNGLPROVOKINGVERTEXPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glProvokingVertex");
		pGlProvokingVertexEXT = (PFNGLPROVOKINGVERTEXEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glProvokingVertexEXT");
		pGlColorMaskIndexedEXT = (PFNGLCOLORMASKINDEXEDEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glColorMaskIndexedEXT");
		pGlEnableIndexedEXT = (PFNGLENABLEINDEXEDEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glEnableIndexedEXT");
		pGlDisableIndexedEXT = (PFNGLDISABLEINDEXEDEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDisableIndexedEXT");
		pGlBlendFuncIndexedAMD = (PFNGLBLENDFUNCINDEXEDAMDPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBlendFuncIndexedAMD");
		pGlBlendFunciARB = (PFNGLBLENDFUNCIPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBlendFunciARB");
		pGlBlendEquationIndexedAMD = (PFNGLBLENDEQUATIONINDEXEDAMDPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBlendEquationIndexedAMD");
		pGlBlendEquationiARB = (PFNGLBLENDEQUATIONIPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBlendEquationiARB");
		pGlProgramParameteriARB = (PFNGLPROGRAMPARAMETERIARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glProgramParameteriARB");
		pGlProgramParameteriEXT = (PFNGLPROGRAMPARAMETERIEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glProgramParameteriEXT");

		// occlusion query
		pGlGenQueriesARB = (PFNGLGENQUERIESARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenQueriesARB");
		pGlDeleteQueriesARB = (PFNGLDELETEQUERIESARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteQueriesARB");
		pGlIsQueryARB = (PFNGLISQUERYARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glIsQueryARB");
		pGlBeginQueryARB = (PFNGLBEGINQUERYARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBeginQueryARB");
		pGlEndQueryARB = (PFNGLENDQUERYARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glEndQueryARB");
		pGlGetQueryivARB = (PFNGLGETQUERYIVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetQueryivARB");
		pGlGetQueryObjectivARB = (PFNGLGETQUERYOBJECTIVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetQueryObjectivARB");
		pGlGetQueryObjectuivARB = (PFNGLGETQUERYOBJECTUIVARBPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetQueryObjectuivARB");
		pGlGenOcclusionQueriesNV = (PFNGLGENOCCLUSIONQUERIESNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGenOcclusionQueriesNV");
		pGlDeleteOcclusionQueriesNV = (PFNGLDELETEOCCLUSIONQUERIESNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glDeleteOcclusionQueriesNV");
		pGlIsOcclusionQueryNV = (PFNGLISOCCLUSIONQUERYNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glIsOcclusionQueryNV");
		pGlBeginOcclusionQueryNV = (PFNGLBEGINOCCLUSIONQUERYNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBeginOcclusionQueryNV");
		pGlEndOcclusionQueryNV = (PFNGLENDOCCLUSIONQUERYNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glEndOcclusionQueryNV");
		pGlGetOcclusionQueryivNV = (PFNGLGETOCCLUSIONQUERYIVNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetOcclusionQueryivNV");
		pGlGetOcclusionQueryuivNV = (PFNGLGETOCCLUSIONQUERYUIVNVPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glGetOcclusionQueryuivNV");

		// blend equation
		pGlBlendEquationEXT = (PFNGLBLENDEQUATIONEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBlendEquationEXT");
		pGlBlendEquation = (PFNGLBLENDEQUATIONPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glBlendEquation");

		// get vsync extension
#if defined(WGL_EXT_swap_control) && !defined(_SAPPHIRE_COMPILE_WITH_SDL_DEVICE_)
		pWglSwapIntervalEXT = (PFNWGLSWAPINTERVALEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("wglSwapIntervalEXT");
#endif
#if defined(GLX_SGI_swap_control) && !defined(_SAPPHIRE_COMPILE_WITH_SDL_DEVICE_)
		pGlxSwapIntervalSGI = (PFNGLXSWAPINTERVALSGIPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glXSwapIntervalSGI");
#endif
#if defined(GLX_EXT_swap_control) && !defined(_SAPPHIRE_COMPILE_WITH_SDL_DEVICE_)
		pGlxSwapIntervalEXT = (PFNGLXSWAPINTERVALEXTPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glXSwapIntervalEXT");
#endif
#if defined(GLX_MESA_swap_control) && !defined(_SAPPHIRE_COMPILE_WITH_SDL_DEVICE_)
		pGlxSwapIntervalMESA = (PFNGLXSWAPINTERVALMESAPROC)SAPPHIRE_OGL_LOAD_EXTENSION("glXSwapIntervalMESA");
#endif
#endif // use extension pointer

		GLint num = 0;
		// set some properties
#if defined(GL_ARB_multitexture) || defined(GL_VERSION_1_3)
		if (Version>102 || FeatureAvailable[SAPPHIRE_ARB_multitexture])
		{
#if defined(GL_MAX_TEXTURE_UNITS)
			glGetIntegerv(GL_MAX_TEXTURE_UNITS, &num);
#elif defined(GL_MAX_TEXTURE_UNITS_ARB)
			glGetIntegerv(GL_MAX_TEXTURE_UNITS_ARB, &num);
#endif
			MaxSupportedTextures = static_cast<UINT8>(num);
		}
#endif
#if defined(GL_ARB_vertex_shader) || defined(GL_VERSION_2_0)
		if (Version >= 200 || FeatureAvailable[SAPPHIRE_ARB_vertex_shader])
		{
			num = 0;
#if defined(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS)
			glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &num);
#elif defined(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB)
			glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB, &num);
#endif
			MaxSupportedTextures = Math::_max<UINT8>(MaxSupportedTextures, static_cast<UINT8>(num));
		}
#endif
		glGetIntegerv(GL_MAX_LIGHTS, &num);
		MaxLights = static_cast<UINT8>(num);
#ifdef GL_EXT_texture_filter_anisotropic
		if (FeatureAvailable[SAPPHIRE_EXT_texture_filter_anisotropic])
		{
			glGetIntegerv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &num);
			MaxAnisotropy = static_cast<UINT8>(num);
		}
#endif
#ifdef GL_VERSION_1_2
		if (Version>101)
		{
			glGetIntegerv(GL_MAX_ELEMENTS_INDICES, &num);
			MaxIndices = num;
		}
#endif
		glGetIntegerv(GL_MAX_TEXTURE_SIZE, &num);
		MaxTextureSize = static_cast<UINT32>(num);
		if (queryFeature(EVDF_GEOMETRY_SHADER))
		{
#if defined(GL_ARB_geometry_shader4) || defined(GL_EXT_geometry_shader4) || defined(GL_NV_geometry_shader4)
			glGetIntegerv(GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT, &num);
			MaxGeometryVerticesOut = static_cast<UINT32>(num);
#elif defined(GL_NV_geometry_program4)
			extGlGetProgramiv(GEOMETRY_PROGRAM_NV, GL_MAX_PROGRAM_OUTPUT_VERTICES_NV, &num);
			MaxGeometryVerticesOut = static_cast<UINT32>(num);
#endif
		}
#ifdef GL_EXT_texture_lod_bias
		if (FeatureAvailable[SAPPHIRE_EXT_texture_lod_bias])
			glGetFloatv(GL_MAX_TEXTURE_LOD_BIAS_EXT, &MaxTextureLODBias);
#endif
		glGetIntegerv(GL_MAX_CLIP_PLANES, &num);
		MaxUserClipPlanes = static_cast<UINT8>(num);
		glGetIntegerv(GL_AUX_BUFFERS, &num);
		MaxAuxBuffers = static_cast<UINT8>(num);
#ifdef GL_ARB_draw_buffers
		if (FeatureAvailable[SAPPHIRE_ARB_draw_buffers])
		{
			glGetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &num);
			MaxMultipleRenderTargets = static_cast<UINT8>(num);
		}
#endif
#if defined(GL_ATI_draw_buffers)
#ifdef GL_ARB_draw_buffers
		else
#endif
			if (FeatureAvailable[SAPPHIRE_ATI_draw_buffers])
			{
				glGetIntegerv(GL_MAX_DRAW_BUFFERS_ATI, &num);
				MaxMultipleRenderTargets = static_cast<UINT8>(num);
			}
#endif
		glGetFloatv(GL_ALIASED_LINE_WIDTH_RANGE, DimAliasedLine);
		glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, DimAliasedPoint);
		glGetFloatv(GL_SMOOTH_LINE_WIDTH_RANGE, DimSmoothedLine);
		glGetFloatv(GL_SMOOTH_POINT_SIZE_RANGE, DimSmoothedPoint);
#if defined(GL_ARB_shading_language_100) || defined (GL_VERSION_2_0)
		if (FeatureAvailable[SAPPHIRE_ARB_shading_language_100] || Version >= 200)
		{
			glGetError(); // clean error buffer
#ifdef GL_SHADING_LANGUAGE_VERSION
			const GLubyte* shaderVersion = glGetString(GL_SHADING_LANGUAGE_VERSION);
#else
			const GLubyte* shaderVersion = glGetString(GL_SHADING_LANGUAGE_VERSION_ARB);
#endif
			if (glGetError() == GL_INVALID_ENUM)
				ShaderLanguageVersion = 100;
			else
			{
				const Real sl_ver = fast_atof(reinterpret_cast<const c8*>(shaderVersion));
				ShaderLanguageVersion = static_cast<UINT16>(floor32(sl_ver) * 100 + round32(fract(sl_ver)*10.0f));
			}
		}
#endif

#ifdef SAPPHIRE_OPENGL_USE_EXTPOINTER
		if (!pGlActiveTextureARB || !pGlClientActiveTextureARB)
		{
			MultiTextureExtension = false;
			Printer::log("Failed to load OpenGL's multitexture extension, proceeding without.", LML_CRITICAL);
		}
		else
#endif
			MaxTextureUnits = Math::_min(MaxSupportedTextures, static_cast<UINT8>(MATERIAL_MAX_TEXTURES));
		if (MaxTextureUnits < 2)
		{
			MultiTextureExtension = false;
			Printer::log("Warning: OpenGL device only has one texture unit. Disabling multitexturing.", LML_CRITICAL);
		}

#ifdef GL_ARB_occlusion_query
		if (FeatureAvailable[SAPPHIRE_ARB_occlusion_query])
		{
			extGlGetQueryiv(GL_SAMPLES_PASSED_ARB, GL_QUERY_COUNTER_BITS_ARB,
				&num);
			OcclusionQuerySupport = (num>0);
		}
		else
#endif
#ifdef GL_NV_occlusion_query
			if (FeatureAvailable[SAPPHIRE_NV_occlusion_query])
			{
				glGetIntegerv(GL_PIXEL_COUNTER_BITS_NV, &num);
				OcclusionQuerySupport = (num>0);
			}
			else
#endif
				OcclusionQuerySupport = false;

#ifdef _DEBUG
		if (FeatureAvailable[SAPPHIRE_NVX_gpu_memory_info])
		{
			// undocumented flags, so use the RAW values
			GLint val = 0;
			StringUtil::StrStreamType ss;
			ss << val;
			glGetIntegerv(0x9047, &val);
			Printer::log("Dedicated video memory (kB)", ss.str());
			glGetIntegerv(0x9048, &val);
			Printer::log("Total video memory (kB)", ss.str());
			glGetIntegerv(0x9049, &val);
			Printer::log("Available video memory (kB)", ss.str());
		}
#ifdef GL_ATI_meminfo
		if (FeatureAvailable[SAPPHIRE_ATI_meminfo])
		{
			GLint val[4];
			StringUtil::StrStreamType ss;
			ss << val;
			glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, val);
			Printer::log("Free texture memory (kB)", ss.str());
			glGetIntegerv(GL_VBO_FREE_MEMORY_ATI, val);
			Printer::log("Free VBO memory (kB)", ss.str());
			glGetIntegerv(GL_RENDERBUFFER_FREE_MEMORY_ATI, val);
			Printer::log("Free render buffer memory (kB)", ss.str());
		}
#endif
#endif
	}
void main(void)\
{\
vec2 n=vec2(fract(sin(dot(o.xy+p[0][0],vec2(12.9898,78.233)))*43758.5453));\
gl_FragColor=texture2D(t,.5*o.xy+.5+.0007*n)+n.x*.02;\
}";
Esempio n. 16
0
KFR_SINTRIN T squarenorm(T x)
{
    return rawsquare(fract(x));
}
Esempio n. 17
0
void main()
{
  //Calculate the ray direction using viewport information
  vec3 rayDirection;
  rayDirection.x = 2.0 * gl_FragCoord.x / WindowSize.x - 1.0;
  rayDirection.y = 2.0 * gl_FragCoord.y / WindowSize.y - 1.0;
  rayDirection.y *= WindowSize.y / WindowSize.x;
  rayDirection.z = -FocalLength;
  rayDirection = (vec4(rayDirection, 0.0) * ViewMatrix).xyz;
  rayDirection = normalize(rayDirection);

  //Cube ray intersection test
  vec3 invR = 1.0 / rayDirection;
  vec3 boxMin = vec3(-1.0,-1.0,-1.0);
  vec3 boxMax = vec3( 1.0, 1.0, 1.0);
  vec3 tbot = invR * (boxMin - RayOrigin);
  vec3 ttop = invR * (boxMax - RayOrigin);
  
  //Now sort all elements of tbot and ttop to find the two min and max elements
  vec3 tmin = min(ttop, tbot); //Closest planes
  vec2 t = max(tmin.xx, tmin.yz); //Out of the closest planes, find the last to be entered (collision point)
  float tnear = max(t.x, t.y);//...

  //If the viewpoint is penetrating the volume, make sure to only cast the ray
  //from the eye position, not behind it
  if (tnear < 0.0) tnear = 0.0;

  //Now work out when the ray will leave the volume
  vec3 tmax = max(ttop, tbot); //Distant planes
  t = min(tmax.xx, tmax.yz);//Find the first plane to be exited
  float tfar = min(t.x, t.y);//...

  //Check what the screen depth is to make sure we don't sample the
  //volume past any standard GL objects
  float bufferDepth = texture2D(DepthTexture, gl_FragCoord.xy / WindowSize.xy).r;
  float depth = recalcZCoord(bufferDepth);
  if (tfar > depth) tfar = depth;
  
  //This value is used to ensure that changing the step size does not
  //change the visualization as the alphas are renormalized using it.
  //For more information see the loop below where it is used
  const float baseStepSize = 0.01;

  //We need to calculate the ray's starting position. We add a random
  //fraction of the stepsize to the original starting point to dither
  //the output
  float random = DitherRay * fract(sin(gl_FragCoord.x * 12.9898 + gl_FragCoord.y * 78.233) * 43758.5453); 
  vec3 rayPos = RayOrigin + rayDirection * (tnear + StepSize * random);

  //The color accumulation variable
  vec4 color = vec4(0.0, 0.0, 0.0, 0.0);
  
  //We store the last valid normal, incase we hit a homogeneous region
  //and need to reuse it, but at the start we have no normal
  vec3 lastnorm = vec3(0,0,0); 

  for (float length = tfar - tnear; length > 0.0; 
       length -= StepSize, rayPos.xyz += rayDirection * StepSize)
    {
      //Grab the volume sample
      vec4 sample = texture3D(DataTexture, (rayPos + 1.0) * 0.5);

      //Sort out the normal data
      vec3 norm = sample.xyz * 2.0 - 1.0;
      //Test if we've got a bad normal and need to reuse the old one
      if (dot(norm,norm) < 0.5) norm = lastnorm; 
      //Store the current normal
      lastnorm = norm; 

      //Calculate the color of the voxel using the transfer function
      vec4 src = texture1D(TransferTexture, sample.a);
      
      //This corrects the transparency change caused by changing step
      //size. All alphas are defined for a certain base step size
      src.a = 1.0 - pow((1.0 - src.a), StepSize / baseStepSize);

      ////////////Lighting calculations
      //We perform all the calculations in the model (untransformed)
      //space.
      vec3 lightDir = normalize(LightPosition - rayPos);
      float lightNormDot = dot(normalize(norm), lightDir);
      
      //Diffuse lighting
      float diffTerm =  max(0.5 * lightNormDot + 0.5, 0.5);
      //Quadratic falloff of the diffusive term
      diffTerm *= diffTerm;

      //We either use diffusive lighting plus an ambient, or if its
      //disabled (DiffusiveLighting = 0), we just use the original
      //color.
      vec3 ambient = vec3(0.1,0.1,0.1);
      src.rgb *= DiffusiveLighting * (diffTerm + ambient) + (1.0 - DiffusiveLighting);
      
      //Specular lighting term
      //This is enabled if (SpecularLighting == 1)
      vec3 ReflectedRay = reflect(lightDir, norm);
      src.rgb += SpecularLighting
	* (lightNormDot > 0) //Test to ensure that specular is only
	//applied to front facing voxels
	* vec3(1.0,1.0,1.0) * pow(max(dot(ReflectedRay, rayDirection), 0.0), 96.0);
      
      ///////////Front to back blending
      src.rgb *= src.a;
      color = (1.0 - color.a) * src + color;

      //We only accumulate up to 0.95 alpha (the front to back
      //blending never reaches 1). 
      if (color.a >= 0.95)
	{
	  //We have to renormalize the color by the alpha value (see
	  //below)
	  color.rgb /= color.a;
	  //Set the alpha to one to make sure the pixel is not transparent
	  color.a = 1.0;
	  break;
	}
    }
  /*We must renormalize the color by the alpha value. For example, if
  our ray only hits just one white voxel with a alpha of 0.5, we will have

  src.rgb = vec4(1,1,1,0.5)

  src.rgb *= src.a; 
  //which gives, src.rgb = 0.5 * src.rgb = vec4(0.5,0.5,0.5,0.5)

  color = (1.0 - color.a) * src + color;
  //which gives, color = (1.0 - 0) * vec4(0.5,0.5,0.5,0.5) + vec4(0,0,0,0) = vec4(0.5,0.5,0.5,0.5)

  So the final color of the ray is half way between white and black, but the voxel it hit was white!
  The solution is to divide by the alpha, as this is the "amount of color" added to color.
  */
  color.rgb /= (color.a == 0.0) +  color.a;
  gl_FragColor = color;
});
Esempio n. 18
0
KFR_SINTRIN T sawtoothnorm(T x)
{
    return rawsawtooth(fract(x));
}
Esempio n. 19
0
void UVMapper::apply(Object &obj, Material *materialRef, unsigned short layerRef)
{
	float u,v,s,t;
	Vector normal;

	map<unsigned short, set<cvrIndex>, ltushort>::iterator h;
	/* i -- used to iterate through the set of faces which belong to the given material */
	set<cvrIndex>::iterator i;
	/* j -- used to step through the points of the face which is referenced by i */
	vector<cvrIndex>::iterator j;
	unsigned int seg;
	
	obj.buildRefList();
	if (obj.mat_reflist[materialRef].empty()) return;

	/* calculate the uv for the points referenced by this face's pointref vector */
	switch (projection_mode)
	{
	case UV_PROJECTION_CUBIC: /* cubic projection needs to know the surface normal */

		for (seg = 0; seg < obj.mat_reflist[materialRef].size(); seg++)
		{
			for (h = obj.mat_reflist[materialRef][seg].begin(); h != obj.mat_reflist[materialRef][seg].end(); h++)
			{	
				for (i = (*h).second.begin(); i != (*h).second.end(); i++)
				{
					/* check and see if the uv's are allocated, if not, allocate them */
					if ((unsigned short)obj.faces[(*i)]->uv.size() < layerRef+1) obj.faces[(*i)]->uv.resize(layerRef+1);
					if (obj.faces[(*i)]->uv[layerRef].size() != obj.faces[(*i)]->pointref.size())	obj.faces[(*i)]->uv[layerRef].resize(obj.faces[(*i)]->pointref.size());

					cvrIndex pt_count = 0;	/* used to keep track of which UV we're calculating (since pointrefs aren't necessicarily in order) */
					for (j = obj.faces[(*i)]->pointref.begin(); j != obj.faces[(*i)]->pointref.end(); j++)
					{
						XYZ uvpoint = *obj.points[*j];

						uvpoint -= center;	/* shift the point by the centerpoint */
						
						/* if we have a rotation, rotate the point to match it */
						if (rotation.x || rotation.y || rotation.z)	
						{
							XYZ npoint = uvpoint;
							rotatexyz(rotation,uvpoint,npoint);
							uvpoint = npoint;
						}

						/* first we need to check what the most 'dominant' direction of this face is (axis) */
						
						float nx, ny, nz;
						
						nx = fabs(obj.faces[(*i)]->face_normal.x);
						ny = fabs(obj.faces[(*i)]->face_normal.y);
						nz = fabs(obj.faces[(*i)]->face_normal.z);
						
						/* x portion of vector is dominant, we're mapping in the Y/Z plane */
						if (nx >= ny && nx >= nz)
						{
							/* we use a .5 offset because texture coordinates range from 0->1, so to center it we need to offset by .5 */
							s = uvpoint.z / scale.z + 0.5f;	/* account for scale here */
							t = uvpoint.y / scale.y + 0.5f;
							u = fract(s);
							v = fract(t);
						}

						/* y portion of vector is dominant, we're mapping in the X/Z plane */
						if (ny >= nx && ny >= nz)
						{
							
							s = -uvpoint.x / scale.x + 0.5f;
							t = uvpoint.z / scale.z + 0.5f;
							u = fract(s);
							v = fract(t);
						}

						/* z portion of vector is dominant, we're mapping in the X/Y plane */
						if (nz >= nx && nz >= ny)
						{
							s = -uvpoint.x / scale.x + 0.5f;
							t = uvpoint.y / scale.y + 0.5f;
							u = fract(s);
							v = fract(t);
						}

						if (obj.faces[(*i)]->face_normal.x > 0) { u = -u;  }
						if (obj.faces[(*i)]->face_normal.y < 0) { u = -u;  }
						if (obj.faces[(*i)]->face_normal.z > 0) { u = -u;  }
						
						obj.faces[(*i)]->uv[layerRef][pt_count].u = u;
						obj.faces[(*i)]->uv[layerRef][pt_count].v = v;
						pt_count++; /* next point UV please */
					}
				}
			}
		}
		break;

	default:	/* simple XYZ to UV calc will suffice for non-cubic mapping */

		for (seg = 0; seg < obj.mat_reflist[materialRef].size(); seg++)
		{
			for (h = obj.mat_reflist[materialRef][seg].begin(); h != obj.mat_reflist[materialRef][seg].end(); h++)
			{	
				for (i = (*h).second.begin(); i != (*h).second.end(); i++)
				{
					if ((unsigned short)obj.faces[(*i)]->uv.size() < layerRef+1) obj.faces[(*i)]->uv.resize(layerRef+1);
					if (obj.faces[(*i)]->uv[layerRef].size() != obj.faces[(*i)]->pointref.size())	obj.faces[(*i)]->uv[layerRef].resize(obj.faces[(*i)]->pointref.size());
			
					cvrIndex pt_count = 0;	
					for (j = obj.faces[(*i)]->pointref.begin(); j != obj.faces[(*i)]->pointref.end(); j++)
					{
						getUV(*obj.points[*j], obj.faces[*i]->uv[layerRef][pt_count++]);	/* map it, see getUV */
					}
				}
			}
		}
		break;
	}
};
Esempio n. 20
0
KFR_SINTRIN T isawtoothnorm(T x)
{
    return T(-1) + 2 * fract(x + 0.5);
}
Esempio n. 21
0
		static texel_type call(texture_type const & Texture, fetch_type Fetch, samplecoord_type const & SampleCoordWrap, size_type Layer, size_type Face, interpolate_type Level, texel_type const & BorderColor)
		{
			texel_type const MinTexel = linear<Dimension, texture_type, interpolate_type, samplecoord_type, fetch_type, texel_type, is_float, support_border>::call(Texture, Fetch, SampleCoordWrap, Layer, Face, floor(Level), BorderColor);
			texel_type const MaxTexel = linear<Dimension, texture_type, interpolate_type, samplecoord_type, fetch_type, texel_type, is_float, support_border>::call(Texture, Fetch, SampleCoordWrap, Layer, Face, ceil(Level), BorderColor);
			return mix(MinTexel, MaxTexel, fract(Level));
		}
Esempio n. 22
0
vec3 hsv2rgb(vec3 c)
{
    vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
    vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
    return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
Esempio n. 23
0
void wuline(SDL_Surface *surface,int x1,int y1,int x2,int y2, Uint32 pixel)
{
    double dx,dy,xend,yend,xgap,ygap,xpxl1,ypxl1,xpxl2,ypxl2,intery,interx,gradient, f;
    Uint8 alph;
    dx = x2-x1;
    dy = y2-y1;
    int ax,ay,a,b;
    Uint32 pixnoalph=(pixel&0x00FFFFFF);
    if(abs(dx) > abs(dy))
    {
        if(x2<x1)
        {
            ax=x1;
            x1= x2;
            x2 = ax;
            ay = y1;
            y1 = y2;
            y2 = ay;
        }
        gradient=dy/dx;
        xend=ceil(x1);
        yend=y1+gradient*(xend-x1);
        xgap = 1.0-fract(x1 + 0.5);
        xpxl1 = xend;
        ypxl1 = floor(yend);
        f=1.0-fract(yend)*xgap;
        alph=lerp(rgba(get_dot(surface,xpxl1,ypxl1)),rgba(pixel),f);
        dot(surface,xpxl1,ypxl1,pixnoalph|(alph<<24));
        f = fract(yend)*xgap;
        alph=lerp(rgba(get_dot(surface,xpxl1,ypxl1+1)),rgba(pixel),f);
        dot(surface,xpxl1,ypxl1+1,pixnoalph|(alph<<24));
        intery = yend + gradient;
        xend = ceil(x2);
        yend = y2 + gradient * (xend-x2);
        xgap = 1.0 - fract(x2 + 0.5);
        xpxl2 = xend;
        ypxl2 = (int)(yend);
        f = 1.0-fract(yend)*xgap;
        alph=lerp(rgba(get_dot(surface,xpxl2,ypxl2)),rgba(pixel),f);
        dot(surface,xpxl2,ypxl2,pixnoalph|(alph<<24));
        f = fract(yend)*xgap;
        alph=lerp(rgba(get_dot(surface,xpxl2,ypxl2+1)),rgba(pixel),f);
        dot(surface,xpxl2,ypxl2+1,pixnoalph|(alph<<24));


        a = xpxl1+1;
        b = xpxl2-1;
        for(int x=a;x<=b;x++)
        {

            f = 1.0-fract(intery);
            alph=lerp(rgba(get_dot(surface,x,intery)),rgba(pixel),f);
            dot(surface,x,intery,pixnoalph|(alph<<24));


            f = fract(intery);
            alph=lerp(rgba(get_dot(surface,x,intery+1)),rgba(pixel),f);
            dot(surface,x,intery+1,pixnoalph|(alph<<24));

            intery = intery + gradient;

        }

    }else{
        if(y2 < y1)
        {

            ax = x1;
            x1 = x2;
            x2 = ax;
            ay = y1;
            y1 = y2;
            y2 = ay;
        }

        gradient = dx / dy;

        yend = ceil(y1);
        xend = x1 + gradient * (yend-y1);
        ygap = 1.0 - fract(y1 + 0.5);
        xpxl1 = (int)(xend);
        ypxl1 = yend;
        f = 1.0-fract(xend)*ygap;
        alph=lerp(rgba(get_dot(surface,xpxl1, ypxl1)),rgba(pixel),f);
        dot(surface,xpxl1, ypxl1,pixnoalph|(alph<<24));

        f = fract(xend)*ygap;
        alph=lerp(rgba(get_dot(surface,xpxl1, ypxl1+1)),rgba(pixel),f);
        dot(surface,xpxl1, ypxl1+1,pixnoalph|(alph<<24));

        interx = xend + gradient;

        yend = ceil(y2);
        xend = x2 + gradient * (yend-y2);
        ygap = fract(y2 + 0.5);
        xpxl2 = (int)(xend);
        ypxl2 = yend;
        f = 1.0-fract(xend)*ygap;
        alph=lerp(rgba(get_dot(surface,xpxl2, ypxl2)),rgba(pixel),f);
        dot(surface,xpxl2, ypxl2,pixnoalph|(alph<<24));

        f = fract(xend)*ygap;
        alph=lerp(rgba(get_dot(surface,xpxl2, ypxl2+1)),rgba(pixel),f);
        dot(surface,xpxl2, ypxl2+1,pixnoalph|(alph<<24));


        a = ypxl1+1;
        b = ypxl2-1;
        for(int y=a;y<=b;y++)
        {

            f = 1.0-fract(interx);
            alph=lerp(rgba(get_dot(surface,interx,y)),rgba(pixel),f);
            dot(surface,interx,y,pixnoalph|(alph<<24));

            f = fract(interx);
            alph=lerp(rgba(get_dot(surface,interx+1,y)),rgba(pixel),f);
            dot(surface,interx+1,y,pixnoalph|(alph<<24));
            interx = interx + gradient;
        }
    }
}
Esempio n. 24
0
void main() {
	vec3 outgoingLight = vec3( 0.0 );
	vec4 diffuseColor = vec4( diffuse, opacity );
	vec3 totalAmbientLight = vec3( 1.0 );
	vec3 shadowMask = vec3( 1.0 );
#if defined(USE_LOGDEPTHBUF) && defined(USE_LOGDEPTHBUF_EXT)

	gl_FragDepthEXT = log2(vFragDepth) * logDepthBufFC * 0.5;

#endif
#ifdef USE_MAP

	vec4 texelColor = texture2D( map, vUv );

	texelColor.xyz = inputToLinear( texelColor.xyz );

	diffuseColor *= texelColor;

#endif

#ifdef USE_COLOR

	diffuseColor.rgb *= vColor;

#endif
#ifdef USE_ALPHAMAP

	diffuseColor.a *= texture2D( alphaMap, vUv ).g;

#endif

#ifdef ALPHATEST

	if ( diffuseColor.a < ALPHATEST ) discard;

#endif

float specularStrength;

#ifdef USE_SPECULARMAP

	vec4 texelSpecular = texture2D( specularMap, vUv );
	specularStrength = texelSpecular.r;

#else

	specularStrength = 1.0;

#endif
#ifdef USE_AOMAP

	totalAmbientLight *= ( texture2D( aoMap, vUv2 ).r - 1.0 ) * aoMapIntensity + 1.0;

#endif

#ifdef USE_SHADOWMAP

	for ( int i = 0; i < MAX_SHADOWS; i ++ ) {

		float texelSizeY =  1.0 / shadowMapSize[ i ].y;

		float shadow = 0.0;

#if defined( POINT_LIGHT_SHADOWS )

		bool isPointLight = shadowDarkness[ i ] < 0.0;

		if ( isPointLight ) {

			float realShadowDarkness = abs( shadowDarkness[ i ] );

			vec3 lightToPosition = vShadowCoord[ i ].xyz;

	#if defined( SHADOWMAP_TYPE_PCF ) || defined( SHADOWMAP_TYPE_PCF_SOFT )

			vec3 bd3D = normalize( lightToPosition );
			float dp = length( lightToPosition );

			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D, texelSizeY ) ), shadowBias[ i ], shadow );


	#if defined( SHADOWMAP_TYPE_PCF )
			const float Dr = 1.25;
	#elif defined( SHADOWMAP_TYPE_PCF_SOFT )
			const float Dr = 2.25;
	#endif

			float os = Dr *  2.0 * texelSizeY;

			const vec3 Gsd = vec3( - 1, 0, 1 );

			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.zzz * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.zxz * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.xxz * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.xzz * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.zzx * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.zxx * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.xxx * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.xzx * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.zzy * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.zxy * os, texelSizeY ) ), shadowBias[ i ], shadow );

			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.xxy * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.xzy * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.zyz * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.xyz * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.zyx * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.xyx * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.yzz * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.yxz * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.yxx * os, texelSizeY ) ), shadowBias[ i ], shadow );
			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D + Gsd.yzx * os, texelSizeY ) ), shadowBias[ i ], shadow );

			shadow *= realShadowDarkness * ( 1.0 / 21.0 );

	#else 
			vec3 bd3D = normalize( lightToPosition );
			float dp = length( lightToPosition );

			adjustShadowValue1K( dp, texture2D( shadowMap[ i ], cubeToUV( bd3D, texelSizeY ) ), shadowBias[ i ], shadow );

			shadow *= realShadowDarkness;

	#endif

		} else {

#endif 
			float texelSizeX =  1.0 / shadowMapSize[ i ].x;

			vec3 shadowCoord = vShadowCoord[ i ].xyz / vShadowCoord[ i ].w;


			bvec4 inFrustumVec = bvec4 ( shadowCoord.x >= 0.0, shadowCoord.x <= 1.0, shadowCoord.y >= 0.0, shadowCoord.y <= 1.0 );
			bool inFrustum = all( inFrustumVec );

			bvec2 frustumTestVec = bvec2( inFrustum, shadowCoord.z <= 1.0 );

			bool frustumTest = all( frustumTestVec );

			if ( frustumTest ) {

	#if defined( SHADOWMAP_TYPE_PCF )


				/*
					for ( float y = -1.25; y <= 1.25; y += 1.25 )
						for ( float x = -1.25; x <= 1.25; x += 1.25 ) {
							vec4 rgbaDepth = texture2D( shadowMap[ i ], vec2( x * xPixelOffset, y * yPixelOffset ) + shadowCoord.xy );
							float fDepth = unpackDepth( rgbaDepth );
							if ( fDepth < shadowCoord.z )
								shadow += 1.0;
					}
					shadow /= 9.0;
				*/

				shadowCoord.z += shadowBias[ i ];

				const float ShadowDelta = 1.0 / 9.0;

				float xPixelOffset = texelSizeX;
				float yPixelOffset = texelSizeY;

				float dx0 = - 1.25 * xPixelOffset;
				float dy0 = - 1.25 * yPixelOffset;
				float dx1 = 1.25 * xPixelOffset;
				float dy1 = 1.25 * yPixelOffset;

				float fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx0, dy0 ) ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( 0.0, dy0 ) ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx1, dy0 ) ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx0, 0.0 ) ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx1, 0.0 ) ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx0, dy1 ) ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( 0.0, dy1 ) ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				fDepth = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx1, dy1 ) ) );
				if ( fDepth < shadowCoord.z ) shadow += ShadowDelta;

				shadow *= shadowDarkness[ i ];

	#elif defined( SHADOWMAP_TYPE_PCF_SOFT )


				shadowCoord.z += shadowBias[ i ];

				float xPixelOffset = texelSizeX;
				float yPixelOffset = texelSizeY;

				float dx0 = - 1.0 * xPixelOffset;
				float dy0 = - 1.0 * yPixelOffset;
				float dx1 = 1.0 * xPixelOffset;
				float dy1 = 1.0 * yPixelOffset;

				mat3 shadowKernel;
				mat3 depthKernel;

				depthKernel[ 0 ][ 0 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx0, dy0 ) ) );
				depthKernel[ 0 ][ 1 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx0, 0.0 ) ) );
				depthKernel[ 0 ][ 2 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx0, dy1 ) ) );
				depthKernel[ 1 ][ 0 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( 0.0, dy0 ) ) );
				depthKernel[ 1 ][ 1 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy ) );
				depthKernel[ 1 ][ 2 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( 0.0, dy1 ) ) );
				depthKernel[ 2 ][ 0 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx1, dy0 ) ) );
				depthKernel[ 2 ][ 1 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx1, 0.0 ) ) );
				depthKernel[ 2 ][ 2 ] = unpackDepth( texture2D( shadowMap[ i ], shadowCoord.xy + vec2( dx1, dy1 ) ) );

				vec3 shadowZ = vec3( shadowCoord.z );
				shadowKernel[ 0 ] = vec3( lessThan( depthKernel[ 0 ], shadowZ ) );
				shadowKernel[ 0 ] *= vec3( 0.25 );

				shadowKernel[ 1 ] = vec3( lessThan( depthKernel[ 1 ], shadowZ ) );
				shadowKernel[ 1 ] *= vec3( 0.25 );

				shadowKernel[ 2 ] = vec3( lessThan( depthKernel[ 2 ], shadowZ ) );
				shadowKernel[ 2 ] *= vec3( 0.25 );

				vec2 fractionalCoord = 1.0 - fract( shadowCoord.xy * shadowMapSize[ i ].xy );

				shadowKernel[ 0 ] = mix( shadowKernel[ 1 ], shadowKernel[ 0 ], fractionalCoord.x );
				shadowKernel[ 1 ] = mix( shadowKernel[ 2 ], shadowKernel[ 1 ], fractionalCoord.x );

				vec4 shadowValues;
				shadowValues.x = mix( shadowKernel[ 0 ][ 1 ], shadowKernel[ 0 ][ 0 ], fractionalCoord.y );
				shadowValues.y = mix( shadowKernel[ 0 ][ 2 ], shadowKernel[ 0 ][ 1 ], fractionalCoord.y );
				shadowValues.z = mix( shadowKernel[ 1 ][ 1 ], shadowKernel[ 1 ][ 0 ], fractionalCoord.y );
				shadowValues.w = mix( shadowKernel[ 1 ][ 2 ], shadowKernel[ 1 ][ 1 ], fractionalCoord.y );

				shadow = dot( shadowValues, vec4( 1.0 ) ) * shadowDarkness[ i ];

	#else 
				shadowCoord.z += shadowBias[ i ];

				vec4 rgbaDepth = texture2D( shadowMap[ i ], shadowCoord.xy );
				float fDepth = unpackDepth( rgbaDepth );

				if ( fDepth < shadowCoord.z )
					shadow = shadowDarkness[ i ];

	#endif

			}

#ifdef SHADOWMAP_DEBUG

			if ( inFrustum ) {

				if ( i == 0 ) {

					outgoingLight *= vec3( 1.0, 0.5, 0.0 );

				} else if ( i == 1 ) {

					outgoingLight *= vec3( 0.0, 1.0, 0.8 );

				} else {

					outgoingLight *= vec3( 0.0, 0.5, 1.0 );

				}

			}

#endif

#if defined( POINT_LIGHT_SHADOWS )

		}

#endif

		shadowMask = shadowMask * vec3( 1.0 - shadow );

	}

#endif

	outgoingLight = diffuseColor.rgb * totalAmbientLight * shadowMask;
#ifdef USE_ENVMAP

	#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG )

		vec3 cameraToVertex = normalize( vWorldPosition - cameraPosition );

		vec3 worldNormal = inverseTransformDirection( normal, viewMatrix );

		#ifdef ENVMAP_MODE_REFLECTION

			vec3 reflectVec = reflect( cameraToVertex, worldNormal );

		#else

			vec3 reflectVec = refract( cameraToVertex, worldNormal, refractionRatio );

		#endif

	#else

		vec3 reflectVec = vReflect;

	#endif

	#ifdef DOUBLE_SIDED
		float flipNormal = ( float( gl_FrontFacing ) * 2.0 - 1.0 );
	#else
		float flipNormal = 1.0;
	#endif

	#ifdef ENVMAP_TYPE_CUBE
		vec4 envColor = textureCube( envMap, flipNormal * vec3( flipEnvMap * reflectVec.x, reflectVec.yz ) );

	#elif defined( ENVMAP_TYPE_EQUIREC )
		vec2 sampleUV;
		sampleUV.y = saturate( flipNormal * reflectVec.y * 0.5 + 0.5 );
		sampleUV.x = atan( flipNormal * reflectVec.z, flipNormal * reflectVec.x ) * RECIPROCAL_PI2 + 0.5;
		vec4 envColor = texture2D( envMap, sampleUV );

	#elif defined( ENVMAP_TYPE_SPHERE )
		vec3 reflectView = flipNormal * normalize((viewMatrix * vec4( reflectVec, 0.0 )).xyz + vec3(0.0,0.0,1.0));
		vec4 envColor = texture2D( envMap, reflectView.xy * 0.5 + 0.5 );
	#endif

	envColor.xyz = inputToLinear( envColor.xyz );

	#ifdef ENVMAP_BLENDING_MULTIPLY

		outgoingLight = mix( outgoingLight, outgoingLight * envColor.xyz, specularStrength * reflectivity );

	#elif defined( ENVMAP_BLENDING_MIX )

		outgoingLight = mix( outgoingLight, envColor.xyz, specularStrength * reflectivity );

	#elif defined( ENVMAP_BLENDING_ADD )

		outgoingLight += envColor.xyz * specularStrength * reflectivity;

	#endif

#endif


	outgoingLight = linearToOutput( outgoingLight );

#ifdef USE_FOG

	#ifdef USE_LOGDEPTHBUF_EXT

		float depth = gl_FragDepthEXT / gl_FragCoord.w;

	#else

		float depth = gl_FragCoord.z / gl_FragCoord.w;

	#endif

	#ifdef FOG_EXP2

		float fogFactor = whiteCompliment( exp2( - fogDensity * fogDensity * depth * depth * LOG2 ) );

	#else

		float fogFactor = smoothstep( fogNear, fogFar, depth );

	#endif
	
	outgoingLight = mix( outgoingLight, fogColor, fogFactor );

#endif
	gl_FragColor = vec4( outgoingLight, diffuseColor.a );
}
Esempio n. 25
0
void main()
{
  //Calculate the ray direction using viewport information
  vec3 rayDirection = frag_worldpos - RayOrigin;
  rayDirection = normalize(rayDirection);
  
  //Cube ray intersection test
  vec3 invR = 1.0 / rayDirection;
  vec3 tbot = invR * (volumeMin - RayOrigin);
  vec3 ttop = invR * (volumeMax - RayOrigin);
  
  //Now sort all elements of tbot and ttop to find the two min and max elements
  vec3 tmin = min(ttop, tbot); //Closest planes
  vec2 t = max(tmin.xx, tmin.yz); //Out of the closest planes, find the last to be entered (collision point)
  float tnear = max(t.x, t.y);//...
  
  //If the viewpoint is penetrating the volume, make sure to only cast the ray
  //from the eye position, not behind it
  tnear = max(0.0, tnear);
  
  //Now work out when the ray will leave the volume
  vec3 tmax = max(ttop, tbot); //Distant planes
  t = min(tmax.xx, tmax.yz);//Find the first plane to be exited
  float tfar = min(t.x, t.y);//...
  
  //Check what the screen depth is to make sure we don't sample the
  //volume past any standard GL objects
  float bufferDepth = texelFetch(DepthTexture, ivec2(gl_FragCoord.xy), 0).r;
  float depth = recalcZCoord(bufferDepth);
  tfar = min(depth, tfar);
  
  //We need to calculate the ray's starting position. We add a random
  //fraction of the stepsize to the original starting point to dither
  //the output
  float starting_offset = tnear; 

  if (DitherRay != 0)
    starting_offset += StepSize * fract(sin(gl_FragCoord.x * 12.9898 + gl_FragCoord.y * 78.233) * 43758.5453);

  vec3 rayPos = RayOrigin + rayDirection * starting_offset;
  
  //The color accumulation variable
  vec4 color = vec4(0.0, 0.0, 0.0, 0.0);
  
  //Start the sampling by initialising the ray variables. We take the
  //first sample ready to integrate to the next sample
  vec4 first_sample = texture(DataTexture, (rayPos + 1.0) * 0.5);

  float lastsamplea = first_sample.a;
  vec4 lastTransfer = texture(IntTransferTexture, lastsamplea);
  vec3 lastnorm = first_sample.xyz * 2.0 - vec3(1.0);
  float lastnorm_length = length(lastnorm);
  lastnorm = (lastnorm_length == 0) ? -rayDirection : lastnorm / lastnorm_length;

  //Make this into the ray position step vector
  rayDirection *= StepSize;

  rayPos += rayDirection;

  for (float length = tfar - tnear; length > 0.0;
       length -= StepSize, rayPos += rayDirection)
    {
      //Grab the volume sample
      vec4 sample = texture(DataTexture, (rayPos - volumeMin) * invVolumeDimensions);
      float delta = sample.a - lastsamplea;
      vec4 transfer = texture(IntTransferTexture, sample.a);
      float deltaT = transfer.a - lastTransfer.a;
      vec3 deltaK = transfer.rgb - lastTransfer.rgb;

      vec4 src;
      if (delta == 0.0)
	{ //Special case where the integration breaks down, just use the constant val.
	  src = texture(TransferTexture, sample.a);
	  src.a = (1.0 - exp( - StepSize * src.a));
	}
      else
	{
	  /*Pre-Integrated color calc*/
	  float opacity = 1.0 - exp( - deltaT * StepSize / delta);
	  vec3 color = abs(deltaK) / (abs(deltaT) + 1.0e-10);
	  src = vec4(color, opacity);
	}

      lastTransfer = transfer;
      lastsamplea = sample.a;

      ////////////Lighting calculations
      //We perform all the calculations in the eye space, The normal
      //from the previous step is used, as it is the normal in the
      //direction the ray entered the volume.
      vec3 norm = (ViewMatrix * vec4(lastnorm, 0.0)).xyz;      
      src.rgb = calcLighting((ViewMatrix * vec4(rayPos,1.0)).xyz, norm, src.rgb);

      //Update the lastnormal with the new normal, if it is valid
      norm = sample.xyz * 2.0 - vec3(1.0);
      //Test if we've got a bad normal and need to reuse the old one
      float sqrnormlength = dot(norm,norm);
      norm /= sqrt(sqrnormlength);
      if (sqrnormlength >= 0.01)
	lastnorm = norm;

      ///////////Front to back blending
      src.rgb *= src.a;
      color = (1.0 - color.a) * src + color;
  
      //We only accumulate up to 0.95 alpha (the blending never
      //reaches 1).
      if (color.a >= 0.95)
  	{
  	  //We have to renormalize the color by the alpha value (see
  	  //below)
  	  color.rgb /= color.a;
  	  //Set the alpha to one to make sure the pixel is not transparent
  	  color.a = 1.0;
  	  break;
  	}
    }
  /*We must renormalize the color by the alpha value. For example, if
  our ray only hits just one white voxel with a alpha of 0.5, we will have
  
  src.rgb = vec4(1,1,1,0.5)
  
  src.rgb *= src.a; 
  //which gives, src.rgb = 0.5 * src.rgb = vec4(0.5,0.5,0.5,0.5)
  
  color = (1.0 - color.a) * src + color;
  //which gives, color = (1.0 - 0) * vec4(0.5,0.5,0.5,0.5) + vec4(0,0,0,0) = vec4(0.5,0.5,0.5,0.5)
  
  So the final color of the ray is half way between white and black, but the voxel it hit was white!
  The solution is to divide by the alpha, as this is the "amount of color" added to color.
  */
  color.rgb /= float(color.a == 0.0) + color.a;
  color_out = color;
});
Esempio n. 26
0
static inline vec3 interpolatePath(ObjLoader *path, vec3 alt, float pos)
{
	float vertIdx = pos * (path->getCount() - 1);
	vec3 *verts = path->getVerts3fv() + int(floor(vertIdx));
	return mix(mix(verts[0], verts[1], fract(vertIdx)), alt, myx(pos));
}
Esempio n. 27
0
fract operator* (const int &fr)  {
  return fract(this->numerator*fr, this -> denominator);
}
Esempio n. 28
0
/**
 * RNG for shadow dithering
 */
float rnd(vec2 p) {
	return fract(sin(dot(p, vec2(12.9898, 78.233)))*43758.5453);
}
Esempio n. 29
0
float rand(vec2 co){
    return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453);
}
vec4 randomIteration(vec4 seed)\n\
{\n\
   vec4 adder = vec4(0.735, 0.369, 0.438, 0.921);\n\
   vec4 mult = vec4(9437.4, 7213.5, 5935.72, 4951.6);\n\
   return fract((seed.zxwy + adder) * mult);\n\
}\n\