コード例 #1
0
ファイル: image.cpp プロジェクト: elegabriel/myzju
void Image::SaveTGA(const char *filename) const {
    assert(filename != NULL);
    // must end in .tga
    const char *ext = &filename[strlen(filename)-4];
    assert(!strcmp(ext,".tga"));
    FILE *file = fopen(filename,"wb");
    // misc header information
    for (int i = 0; i < 18; i++) {
        //unsigned char tmp;
        if (i == 2) WriteByte(file,2);
        else if (i == 12) WriteByte(file,width%256);
        else if (i == 13) WriteByte(file,width/256);
        else if (i == 14) WriteByte(file,height%256);
        else if (i == 15) WriteByte(file,height/256);
        else if (i == 16) WriteByte(file,24);
        else if (i == 17) WriteByte(file,32);
        else WriteByte(file,0);
    }
    // the data
    // flip y so that (0,0) is bottom left corner
    for (int y = height-1; y >= 0; y--) {
        for (int x = 0; x < width; x++) {
            Vec3f v = GetPixel(x,y);
            // note reversed order: b, g, r
            WriteByte(file,ClampColorComponent(v.b()));
            WriteByte(file,ClampColorComponent(v.g()));
            WriteByte(file,ClampColorComponent(v.r()));
        }
    }
    fclose(file);
}
コード例 #2
0
void GLCanvas::display(void) {
	glDrawBuffer(GL_BACK);

	Vec3f bg = mesh->background_color;
	// Clear the display buffer, set it to the background color
	glClearColor(bg.r(),bg.g(),bg.b(),0);
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Set the camera parameters
	mesh->camera->glInit(args->width, args->height);
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	mesh->camera->glPlaceCamera();
	InitLight(); // light will be a headlamp!

	if (args->intersect_backfacing)
		glDisable(GL_CULL_FACE);
	else
		glEnable(GL_CULL_FACE);

	glEnable(GL_LIGHTING);
	glEnable(GL_DEPTH_TEST);
	
	//	glCallList(display_list_index);
	HandleGLError(); 

	radiosity->drawVBOs();
	photon_mapping->drawVBOs();
	RayTree::drawVBOs();
	 
	// Swap the back buffer with the front buffer to display
	// the scene
	glutSwapBuffers();
}
コード例 #3
0
ファイル: image.cpp プロジェクト: farhanrahman/AGCW
void Image::SetPixel(unsigned int x, unsigned int y, const Vec3f &color){
	if(x >= 0 && x < width && y >= 0 && y < height){
		unsigned int index = (x*width + y)*numComponents;
		buffer[index] = color.r();
		buffer[index+1] = color.g();
		buffer[index+2] = color.b();
	}
}
コード例 #4
0
ファイル: image.cpp プロジェクト: farhanrahman/AGCW
void Image::SetPixel(unsigned int x, unsigned int y, Vec3f const &color) {
    if(x < width && y < height) {
        unsigned int index = (y*width + x)*numComponents;
        buffer[index] = color.r();
        buffer[index+1] = color.g();
        buffer[index+2] = color.b();
    }
}
コード例 #5
0
ファイル: image.cpp プロジェクト: farhanrahman/AGCW
void Image::SetAllPixels(const Vec3f &color){
	for (unsigned int i = 0; i < height; ++i)
		for (unsigned int j = 0; j < width; ++j){
			unsigned int index = (i*width + j)*numComponents;			
			buffer[index] = color.r();
			buffer[index+1] = color.g();
			buffer[index+2] = color.b(); 
	}
}
コード例 #6
0
// Process the data of checker colors collected under different white balance.
// Assuming the Daylight CCT is set to 5200k, compute the CCT of other white
// balance modes.
void WhiteBalanceTest::processData() {
    ALOGV("Start Processing White Balance Test Data!");

    int numPatches = mCheckerColors.size();
    ALOGV("Processing %d tests with %d patches", 2, numPatches);

    std::vector<Vec3f> xyzColors(numPatches);
    for (int j = 0; j < numPatches; ++j) {
        Vec3f xyzCheckerColor = initializeFromRGB(mCheckerColors[j]);
        xyzColors[j] = xyzCheckerColor;
        ALOGV("XYZ coordinate is %f, %f, %f", xyzCheckerColor.r(),
              xyzCheckerColor.g(), xyzCheckerColor.b());
    }

    Vec3f meanScale(0.f, 0.f, 0.f);

    if (mMode == "daylight") {
        mXyzColorsDaylight = xyzColors;
        // For testing the auto white balance mode. Compute a CCT that would
        // map the gray checkers to a white point.
        for (int j = 1; j < numPatches; ++j) {
            meanScale = meanScale +
                    (mXyzColorsDaylight[j] / kDaylightWhitePoint);
        }
    } else {
        for (int j = 1; j < numPatches; ++j) {
            meanScale = meanScale + (mXyzColorsDaylight[j] / xyzColors[j]);
        }
    }

    meanScale = meanScale / (numPatches - 1);
    ALOGV("Scale: %f, %f, %f", meanScale.r(), meanScale.g(), meanScale.b());

    Vec3f whitePoint;
    whitePoint = meanScale * kDaylightWhitePoint;

    ALOGV("White point is %f, %f, %f", whitePoint.r(),
         whitePoint.g(), whitePoint.b());

    mCorrelatedColorTemp = findCorrelatedColorTemp(whitePoint);
    ALOGV("CCT is %d", mCorrelatedColorTemp);
}
コード例 #7
0
// Given a white point, find the correlated color temperature.
// Formula taken from the paper "Calculating Correlated Color Temperatures
// Across the Entire Gamut of Daylight and Skylight Chromaticities" by Hernandez
// Andres et al. in 1999. The numbers are fitting parameters.
int WhiteBalanceTest::findCorrelatedColorTemp(const Vec3f &whitePoint) {
    Vec2f chromaOfWhitePoint(
        whitePoint.r() / (whitePoint.r() + whitePoint.g() + whitePoint.b()),
        whitePoint.g() / (whitePoint.r() + whitePoint.g() + whitePoint.b()));

    float n = (chromaOfWhitePoint.x() - 0.3366f)
                / (chromaOfWhitePoint.y() - 0.1735f);
    float y = -949.86315f + 6253.80338f * exp(-n / 0.92159f)
               + 28.70599f * exp(-n / 0.20039f) + 0.00004f * exp(-n / 0.07125f);

    return static_cast<int>(y);
}
コード例 #8
0
// Converts a RGB pixel value to XYZ color space.
Vec3f WhiteBalanceTest::initializeFromRGB(const Vec3f &rgb) {
    float linearRed = convertToLinear(rgb.r());
    float linearGreen = convertToLinear(rgb.g());
    float linearBlue = convertToLinear(rgb.b());

    float x = 0.4124f * linearRed + 0.3576f * linearGreen +
            0.1805f * linearBlue;
    float y = 0.2126f * linearRed + 0.7152f * linearGreen +
            0.0722f * linearBlue;
    float z = 0.0193f * linearRed + 0.1192f * linearGreen +
            0.9505f * linearBlue;

    return Vec3f(x, y, z);
}
コード例 #9
0
ファイル: image.cpp プロジェクト: elegabriel/myzju
void Image::SavePPM(const char *filename) const {
    assert(filename != NULL);
    // must end in .ppm
    const char *ext = &filename[strlen(filename)-4];
    assert(!strcmp(ext,".ppm"));
    FILE *file = fopen(filename, "w");
    // misc header information
    assert(file != NULL);
    fprintf (file, "P6\n");
    fprintf (file, "# Creator: Image::SavePPM()\n");
    fprintf (file, "%d %d\n", width,height);
    fprintf (file, "255\n");
    // the data
    // flip y so that (0,0) is bottom left corner
    for (int y = height-1; y >= 0; y--) {
        for (int x=0; x<width; x++) {
            Vec3f v = GetPixel(x,y);
            fputc (ClampColorComponent(v.r()),file);
            fputc (ClampColorComponent(v.g()),file);
            fputc (ClampColorComponent(v.b()),file);
        }
    }
    fclose(file);
}
コード例 #10
0
ファイル: rayTracer.cpp プロジェクト: perfect28/MIT-Graphics
Vec3f RayTracer::traceRay(Ray &ray, float tmin, int bounces, float weight,
	float indexOfRefraction, Hit &hit) const
{
	//printf("当前已有光线:\n");
	//RayTree::Print();

	Vec3f canswer;
	if (bounces > max_bounces)
		return Vec3f(0.0f, 0.0f, 0.0f); 
	Camera *camera = sceneParser->getCamera();
	Group *group = sceneParser->getGroup();
	int num_lights = sceneParser->getNumLights();
	Vec3f cambient = sceneParser->getAmbientLight();
	//原来最后是这里出了问题,一旦碰到有转换的物体,那么hit带出来的值是
	//转换后的视线看到的值,而非本来视线看到的值
	//所以解决方案是:距离不变,根据距离重新计算焦点
	if (group->intersect(ray, hit, tmin))//撞到了
	{
		if (is_view_ray)
		{
			RayTree::SetMainSegment(ray, 0, hit.getT());
			is_view_ray = false;
		}
		Vec3f cobject = hit.getMaterial()->getDiffuseColor();
		Vec3f hitPoint = hit.getIntersectionPoint();
		//环境光部分
		canswer = cambient * cobject;
		Vec3f clight;//光的颜色
		Vec3f light_dir;//指向光的方向
		Vec3f normal_dir = hit.getNormal();//交点法线向量
		float distolight;//距离光源的距离
		for (int i = 0; i < num_lights; i++)
		{
			Light *light = sceneParser->getLight(i);
			//light_dir : the direction to the light
			// 该方法用于获得指向光的方向,光的颜色,和到达光的距离
			// 第一个参数传递的是焦点信息
			light->getIllumination(hitPoint, light_dir, clight, distolight);

			Ray ray2(hitPoint, light_dir);
			Vec3f init_normal(0, 0, 0);
			Hit hit2(distolight, NULL, init_normal);
			//阴影检测
			if (shadow)
			{
				if (group->intersect(ray2, hit2, tmin)){
					RayTree::AddShadowSegment(ray2, 0, hit2.getT());
					continue;
				}
				RayTree::AddShadowSegment(ray2, 0, hit2.getT());
			}
			//cpixel  =  cambient * cobject + SUMi [ clamped(Li . N) * clighti * cobject ]
			//返回局部光
			canswer = canswer + hit.getMaterial()->Shade(ray, hit, light_dir, clight);
		}

		//printf("当前已有光线:\n");
		//RayTree::Print();

		
		//反射光
		Material *material = hit.getMaterial();
		Vec3f rc = material->getReflectiveColor();
		if (rc.r() > 0 && rc.g() > 0 && rc.b() > 0)
		{
			Vec3f mirrorDir;
			Vec3f incoming = ray.getDirection();
			mirrorDir = mirrorDirection(normal_dir, incoming);
			// The ray weight is simply multiplied by the magnitude of the reflected color
			Ray ray3(hitPoint, mirrorDir);
			Vec3f init_normal(0, 0, 0);
			Hit hit3(distolight, NULL, init_normal);
			//忘记乘以本身的反射光系数%…………
			canswer += traceRay(ray3, tmin, bounces + 1, weight*rc.Length(), indexOfRefraction, hit3)*rc;
			if (bounces + 1 < max_bounces)
				RayTree::AddReflectedSegment(ray3, 0, hit3.getT());
		}

		//printf("当前已有光线:\n");
		//RayTree::Print();


		//从这里开始还都存在问题!!!!!
		//折射光
		Vec3f transmitted;
		Vec3f tc = material->getTransparentColor();
		float index = material->getIndexOfRefraction();
		if (tc.r() > 0 && tc.g() > 0 && tc.b() > 0)
		{
			Vec3f init_normal(0, 0, 0);
			Hit hit4(distolight, NULL, init_normal);
			//在判断折射光的存在之后,要考虑光线的位置:物体内还是物体外
			//这里根据normal和incoming的点积来判断
			Vec3f incoming = ray.getDirection();
			float judge = normal_dir.Dot3(incoming);
			if (judge < 0)//光线在外
			{
				if (transmittedDirection(normal_dir, incoming, 1, index, transmitted))
				{
					Ray ray4(hitPoint, transmitted);
					canswer += traceRay(ray4, tmin, bounces+1, weight*rc.Length(), index, hit4)*tc;
					RayTree::AddTransmittedSegment(ray4, 0, hit4.getT());
				}
			}
			else//光线在内
			{
				normal_dir.Negate();
				if (transmittedDirection(normal_dir, incoming, index, 1, transmitted))
				{
					Ray ray4(hitPoint, transmitted);
					canswer += traceRay(ray4, tmin, bounces+1, weight*rc.Length(), 1, hit4)*tc;
					RayTree::AddTransmittedSegment(ray4, 0, hit4.getT());
				}
			}
		}

		//printf("当前已有光线:\n");
		//RayTree::Print();

	}
	else
		canswer = sceneParser->getBackgroundColor();

	canswer.Clamp();
	return canswer;
}