int simulateMeasurement(Ray measurementAction, RayTracer &rayt, ros::ServiceClient pfilterAdd, double noiseStdDev) { std::random_device rd; std::normal_distribution<double> randn(0.0, noiseStdDev); tf::Point start = measurementAction.start; tf::Point end = measurementAction.end; tf::Point intersection; geometry_msgs::Point obs; geometry_msgs::Point dir; double distToPart; if(!rayt.traceRay(measurementAction, distToPart)){ ROS_INFO("NO INTERSECTION, Skipping"); return -1; } intersection = start + (end-start).normalize() * (distToPart); std::cout << "Intersection at: " << intersection.getX() << " " << intersection.getY() << " " << intersection.getZ() << std::endl; tf::Point ray_dir(end.x()-start.x(),end.y()-start.y(),end.z()-start.z()); ray_dir = ray_dir.normalize(); obs.x=intersection.getX() + randn(rd); obs.y=intersection.getY() + randn(rd); obs.z=intersection.getZ() + randn(rd); dir.x=ray_dir.x(); dir.y=ray_dir.y(); dir.z=ray_dir.z(); particle_filter::AddObservation pfilter_obs; pfilter_obs.request.p = obs; pfilter_obs.request.dir = dir; if(!pfilterAdd.call(pfilter_obs)){ ROS_INFO("Failed to call add observation"); } return 1; }
int main( int argc, char* argv[] ) { // Fill in your implementation here. // This loop loops over each of the input arguments. // argNum is initialized to 1 because the first // "argument" provided to the program is actually the // name of the executable (in our case, "a4"). for( int argNum = 1; argNum < argc; ++argNum ) { std::cout << "Argument " << argNum << " is: " << argv[argNum] << std::endl; } int w, h ; //img size float depthMin, depthMax; char filename[80]; char output[80]; char depthOutput[80]; char normalsOutput[80]; bool depthMode = false, normalsMode = false, imageMode = false; int max_bounces = 0; float cutoff_weight; bool shadows = false; bool refraction = false; int uniform_samples = 0; int jitter_samples = 0; float box_filter_radius; bool render_samples = false; char* render_samples_outfile; int zoom_factor; for( int i = 0 ; i < argc ; i++) { if(!strcmp(argv[i], "-input")){ strcpy(filename, argv[i+1]); } else if(!strcmp(argv[i], "-size")){ w = atoi(argv[i+1]); h = atoi(argv[i+2]); } else if(!strcmp(argv[i], "-output")){ strcpy(output , argv[i+1]); imageMode = true; } else if(!strcmp(argv[i], "-depth")){ depthMode = true; depthMin = atof(argv[i+1]); depthMax = atof(argv[i+2]); strcpy(depthOutput , argv[i+3]); } else if(!strcmp(argv[i], "-normals")){ normalsMode = true; strcpy(normalsOutput , argv[i+1]); } else if(!strcmp(argv[i], "-bounces")){ max_bounces = atoi(argv[i+1]); } else if(!strcmp(argv[i], "-weight")){ cutoff_weight = atoi(argv[i+1]); } else if(!strcmp(argv[i], "-shadows")){ shadows = true; } else if(!strcmp(argv[i], "-uniform_samples")){ uniform_samples = atoi(argv[i+1]); } else if(!strcmp(argv[i], "-jittered_samples")){ jitter_samples = atoi(argv[i+1]); } else if(!strcmp(argv[i], "-box_filter")){ box_filter_radius = atof(argv[i+1]); } else if(!strcmp(argv[i], "-render_samples")){ // strcpy(render_samples_outfile, argv[i+1]); render_samples_outfile = argv[i+1]; zoom_factor = atoi(argv[i+2]); render_samples = true; } else if (!strcmp(argv[i], "-refraction")){ refraction = true; } } // First, parse the scene using SceneParser. // Then loop over each pixel in the image, shooting a ray // through that pixel and finding its intersection with // the scene. Write the color at the intersection to that // pixel in your output image. SceneParser sp = SceneParser(filename); RayTracer rt = RayTracer(&sp, max_bounces, cutoff_weight, shadows, refraction); Image image( w , h ); Image depth( w , h ); Image normals( w,h ); Camera* camera = sp.getCamera(); // Variables for anti-aliasing SampleDebugger *sd; Hit hit; int num_samples = max(uniform_samples, jitter_samples); if (render_samples) { // cout << "render samples - now making the sample_debugger" << endl; sd = new SampleDebugger(w, h, num_samples); } // cout << "now starting iteration through pixels" << endl; for( int j = 0 ; j < h ; j++ ) { for ( int i = 0 ; i < w ; i++ ) { // if (i > 144 && j > 43) {cout << "at beginning of loop i = " << i<< "j = " << j << endl;} Vector3f pixelColor; Vector3f normalVal; // if (i > 144 && j > 43) {cout << " checking num_samples" << endl;} if (num_samples > 0) { float grid_width = sqrt(num_samples); float max_offset = 1.0/grid_width; float offset = 0; // if (i > 144 && j > 43) {cout << " where is this getting stuck - jitter samples?" << endl;} if (jitter_samples > 0) { offset += (float)rand()/RAND_MAX * max_offset; } int count = 0; Vector3f color_sum = Vector3f(0.0, 0.0, 0.0); // if (i > 144 && j > 43) {cout << " where is this getting stuck - for loop?" << endl;} for (int grid_x = 0; grid_x < grid_width; grid_x++) { for (int grid_y = 0; grid_y < grid_width; grid_y++) { // if (i > 144 && j > 43) {cout << " in second for loop: grid_x = " << grid_x << "grid y =" << grid_y << endl;} float xin = grid_x*max_offset + i + offset; float yin = grid_y*max_offset + j + offset; float normX = (float)((float)(xin-((float)w)/2))/((float)w/2); float normY = (float)((float)(yin-((float)h)/2))/((float)h/2); Ray ray = camera->generateRay( Vector2f( normX , normY ) ); hit = Hit(INFINITY, NULL, Vector3f(1,0,0)); Vector3f local_color = rt.traceRay(ray, camera->getTMin(), max_bounces, cutoff_weight, hit); color_sum += local_color; // if (i > 144 && j > 43) {cout << " where is this getting stuck first render sampels?" << endl;} if (render_samples) { cout << "1) count = " << count << endl; Vector2f offset_vec = Vector2f(max_offset*grid_x+offset, max_offset*grid_y+offset); sd->setSample(i, j, count, offset_vec, local_color); count++; } } } // if (i > 144 && j > 43) {cout << " where is this getting stuck - setting pixel color?" << endl;} pixelColor = color_sum/num_samples; } else { // float x = 2*((float)j/((float)w - 1.0f)) - 1.0f; // float y = 2*((float)i/((float)h - 1.0f)) - 1.0f; float x = (float)((float)(i+0.25-((float)w)/2))/((float)w/2); float y = (float)((float)(j+0.25-((float)h)/2))/((float)h/2); Ray ray = camera->generateRay( Vector2f( x , y ) ); // if (i > 144 && j > 43) {cout << " where is this getting stuck - tracing the ray?" << endl;} // group->intersect( ray , hit , camera->getTMin() ) ; hit = Hit(INFINITY, NULL, Vector3f(1,0,0)); Vector3f color_normal = rt.traceRay(ray, camera->getTMin(), max_bounces, cutoff_weight, hit); // if (i > 144 && j > 43) {cout << " made it through traceRay?" << endl;} pixelColor = color_normal; // if( hit.getMaterial()==NULL){ //background // // pixelColor = Scene.getBackgroundColor(); // normalVal = Vector3f(0.0,0.0,0.0); // } // else{ // //ambient light // pixelColor = PhongMaterial::pointwiseDot( Scene.getAmbientLight(), hit.getMaterial()->getDiffuseColor()); // //defussion light // for( int i = 0 ; i < Scene.getNumLights(); i++){ // Light* light = Scene.getLight(i); // Vector3f dirToLight, lightColor ; // Vector3f position = ray.pointAtParameter(hit.getT()); // float dist = hit.getT(); // light->getIllumination( position , dirToLight , lightColor , dist); // // pixelColor += hit.getMaterial()->Shade( ray , hit , dirToLight , lightColor ) ; // } // // //normal map // Vector3f n = hit.getNormal(); // normalVal = Vector3f( abs(n[0]),abs(n[1]),abs(n[2])); // } } float d = clampedDepth( hit.getT(), depthMin , depthMax); // cout << "setting pixel for i,j = " << i << ", " << j << endl; depth.SetPixel( i , j , Vector3f(d,d,d)); image.SetPixel( i , j , pixelColor ); // if (i > 144) {cout << "where is this getting stuck?" << endl;} normalVal = hit.getNormal(); for (int k = 0; k < 3; k++) { // if (i > 144) {cout << "where is this getting stuck? in normals?" << endl;} normalVal[k] = fabs(normalVal[k]); } // if (i > 144) {cout << "where is this getting stuck? setting normals?" << endl;} normals.SetPixel( i , j , normalVal) ; // if (i > 144) {cout << "where is this getting stuck? redner samples??" << endl;} // if (i > 144) {cout << "where is this getting stuck? before starting the next loop?" << endl;} } } cout << "output = " << output << "should not be null!" << endl; if(imageMode){image.SaveTGA(output);} if( depthMode){ depth.SaveTGA(depthOutput);} if( normalsMode){ normals.SaveTGA(normalsOutput);} if (render_samples) { sd->renderSamples(render_samples_outfile, zoom_factor); } return 0; }
int main( int argc, char* argv[] ) { // Fill in your implementation here. // This loop loops over each of the input arguments. // argNum is initialized to 1 because the first // "argument" provided to the program is actually the // name of the executable (in our case, "a4"). string sceneInput; int sizeX; int sizeY; string outputFile; string normalFile; int depth1; int depth2; string depthFile; bool shadows = false; int bounces = 0; float weight = 0.1; int numSamples = 0; bool uniformSamples = true; bool jitteredSamples = false; float boxFilterRadius = 0.0f; bool antialiasing = false; string renderSamplesFile; int renderSamplesFactor = 1; for( int argNum = 1; argNum < argc; ++argNum ) { //std::cout << "Argument " << argNum << " is: " << argv[argNum] << std::endl; string arg = argv[argNum]; if (arg == "-input") { argNum++; sceneInput = argv[argNum]; } else if (arg == "-size") { argNum++; sscanf(argv[argNum], "%d", &sizeX); argNum++; sscanf(argv[argNum], "%d", &sizeY); } else if (arg == "-output") { argNum++; outputFile = argv[argNum]; } else if (arg == "-normals") { argNum++; normalFile = argv[argNum]; } else if (arg == "-depth") { argNum++; sscanf(argv[argNum], "%d", &depth1); argNum++; sscanf(argv[argNum], "%d", &depth2); argNum++; depthFile = argv[argNum]; } else if (arg == "-bounces") { argNum++; sscanf(argv[argNum], "%d", &bounces); } else if (arg == "-weight") { argNum++; sscanf(argv[argNum], "%f", &weight); } else if (arg == "-shadows") { shadows = true; } else if (arg == "-uniform_samples") { uniformSamples = true; argNum++; sscanf(argv[argNum], "%d", &numSamples); } else if (arg == "-jittered_samples") { jitteredSamples = true; argNum++; sscanf(argv[argNum], "%d", &numSamples); } else if (arg == "-box_filter") { argNum++; sscanf(argv[argNum], "%f", &boxFilterRadius); antialiasing = true; } else if (arg == "-render_samples") { argNum++; renderSamplesFile = argv[argNum]; argNum++; sscanf(argv[argNum], "%d", &renderSamplesFactor); } else { std::cout << "Argument not implemented " << argNum << " is: " << argv[argNum] << std::endl; } } assert(sceneInput != ""); SceneParser* sceneParser = new SceneParser( (char*)sceneInput.c_str() ); Camera* camera = sceneParser->getCamera(); Group* objects = sceneParser->getGroup(); // First, parse the scene using SceneParser. // Then loop over each pixel in the image, shooting a ray // through that pixel and finding its intersection with // the scene. Write the color at the intersection to that // pixel in your output image. float stepX = 2.0f/(sizeX); float stepY = 2.0f/(sizeY); float stepXStart = 3.0 * stepX / 8.0f; float stepYStart = 3.0 * stepY / 8.0f; int rootNumSamples = (int)sqrt(numSamples); float stepRoot = 1.0f / rootNumSamples; float stepRootStart = stepRoot / 2.0f; //float stepXrender = 2.0f/(sizeX - 1)/renderSamplesFactor; //float stepYrender = 2.0f/(sizeY - 1)/renderSamplesFactor; Image* output; Image* depth; Image* normal; SampleDebugger* render; if (outputFile != "") { output = new Image( sizeX, sizeY ); output->SetAllPixels( sceneParser->getBackgroundColor() ); } if (depthFile != "") { depth = new Image( sizeX, sizeY ); depth->SetAllPixels( Vector3f::ZERO ); } if (normalFile != "") { normal = new Image( sizeX, sizeY ); normal->SetAllPixels( Vector3f::ZERO ); } if (renderSamplesFile != "") { render = new SampleDebugger( sizeX, sizeY, numSamples ); } RayTracer rayTracer = RayTracer( sceneParser, bounces, weight, shadows ); for (int x = 0; x < sizeX; x++) { for (int y = 0; y < sizeY; y++) { Vector2f point = Vector2f(-1 + ((x + 0.5f) * stepX), -1 + ((y + 0.5f) * stepY)); Ray ray = camera->generateRay( point ); Hit hit = Hit(); float tmin = camera->getTMin(); if (renderSamplesFile != "") { for (int i = 0; i < numSamples; i++) { int row = floor((float)i / (float)rootNumSamples); int col = i % rootNumSamples; Vector2f offset = Vector2f( stepRootStart + col * stepRoot, stepRootStart + row * stepRoot); if (jitteredSamples) { offset = Vector2f( nextFloat(), nextFloat()); } Vector3f color = sceneParser->getBackgroundColor(); Ray renderRay = camera->generateRay( point - Vector2f(0.5f * stepX, 0.5f * stepY) + (offset) * Vector2f(stepX, stepY) ); Hit renderHit = Hit(); if (objects->intersect(renderRay, renderHit, tmin)) { color = rayTracer.traceRay( renderRay, tmin, 0, 1.0, renderHit, 1.0 ); } render->setSample( x, y, i, offset, color ); } } //cout << "testing ray at " << ray << tmin << endl; bool intersected = objects->intersect( ray, hit, tmin ); if (intersected || antialiasing) { //cout << "found an intersection for " << ray << "at " << hit.getT() << endl; if (outputFile != "") { Vector3f pixelColor = sceneParser->getBackgroundColor(); if (antialiasing) { Vector3f color = Vector3f( 0 ); for (int i = 0; i < numSamples; i++) { int row = floor((float)i / (float)rootNumSamples); int col = i % rootNumSamples; Vector2f offset = Vector2f( stepRootStart + col * stepRoot, stepRootStart + row * stepRoot); if (jitteredSamples) { offset = Vector2f( nextFloat(), nextFloat() ); } Vector3f aColor = sceneParser->getBackgroundColor(); Ray renderRay = camera->generateRay( point - Vector2f(0.5f * stepX, 0.5f * stepY) + (offset) * Vector2f(2.0 * boxFilterRadius * stepX, 2.0 * boxFilterRadius * stepY) ); Hit renderHit = Hit(); if (objects->intersect(renderRay, renderHit, tmin)) { aColor = rayTracer.traceRay( renderRay, tmin, 0, 1.0, renderHit, 1.0 ); } color += aColor; } pixelColor = color / numSamples; } else if (intersected) { pixelColor = rayTracer.traceRay( ray, tmin, 0, 1.0, hit, 1.0 ); } /* Vector3f pixelColor = sceneParser->getAmbientLight() * hit.getMaterial()->getDiffuseColor(); for (int i = 0; i < sceneParser->getNumLights(); i++) { Light* light = sceneParser->getLight(i); Vector3f p = ray.pointAtParameter( hit.getT() ); Vector3f dir = Vector3f(); Vector3f col = Vector3f(); float distance = 0; light->getIllumination(p, dir, col, distance); pixelColor += hit.getMaterial()->shade( ray, hit, dir, col ); } //cout << "final pixel color: "; //pixelColor.print(); //cout << endl; */ output->SetPixel(x, y, VecUtils::clamp(pixelColor)); } if (depthFile != "") { Vector3f clamped = VecUtils::clamp(Vector3f(hit.getT()), depth1, depth2); Vector3f grayscale = (Vector3f(depth2) - clamped) / (float)(depth2 - depth1); //clamped.print(); //grayscale.print(); depth->SetPixel(x, y, grayscale); } if (normalFile != "") { normal->SetPixel(x, y, VecUtils::absoluteValue(hit.getNormal()) ); } } } } if (outputFile != "") { output->SaveTGA( (char *)outputFile.c_str() ); } if (depthFile != "") { depth->SaveTGA( (char *)depthFile.c_str() ); //printf("depth %d %d\n", depth1, depth2); } if (normalFile != "") { normal->SaveTGA( (char *)normalFile.c_str() ); } if (renderSamplesFile != "") { render->renderSamples( (char *)renderSamplesFile.c_str(), renderSamplesFactor ); } return 0; }
int main(int argc, char **argv) { ros::init(argc, argv, "updating_particles"); ros::NodeHandle n; PlotRayUtils plt; RayTracer rayt; std::random_device rd; std::normal_distribution<double> randn(0.0,0.000001); ROS_INFO("Running..."); ros::Publisher pub_init = n.advertise<particle_filter::PFilterInit>("/particle_filter_init", 5); ros::ServiceClient srv_add = n.serviceClient<particle_filter::AddObservation>("/particle_filter_add"); ros::Duration(1).sleep(); // pub_init.publish(getInitialPoints(plt)); geometry_msgs::Point obs; geometry_msgs::Point dir; double radius = 0.00; int i = 0; //for(int i=0; i<20; i++){ while (i < NUM_TOUCHES) { // ros::Duration(1).sleep(); //tf::Point start(0.95,0,-0.15); //tf::Point end(0.95,2,-0.15); tf::Point start, end; // randomSelection(plt, rayt, start, end); fixedSelection(plt, rayt, start, end); Ray measurement(start, end); double distToPart; if(!rayt.traceRay(measurement, distToPart)){ ROS_INFO("NO INTERSECTION, Skipping"); continue; } tf::Point intersection(start.getX(), start.getY(), start.getZ()); intersection = intersection + (end-start).normalize() * (distToPart - radius); std::cout << "Intersection at: " << intersection.getX() << " " << intersection.getY() << " " << intersection.getZ() << std::endl; tf::Point ray_dir(end.x()-start.x(),end.y()-start.y(),end.z()-start.z()); ray_dir = ray_dir.normalize(); obs.x=intersection.getX() + randn(rd); obs.y=intersection.getY() + randn(rd); obs.z=intersection.getZ() + randn(rd); dir.x=ray_dir.x(); dir.y=ray_dir.y(); dir.z=ray_dir.z(); // obs.x=intersection.getX(); // obs.y=intersection.getY(); // obs.z=intersection.getZ(); // pub_add.publish(obs); // plt.plotCylinder(start, end, 0.01, 0.002, true); plt.plotRay(Ray(start, end)); // ros::Duration(1).sleep(); particle_filter::AddObservation pfilter_obs; pfilter_obs.request.p = obs; pfilter_obs.request.dir = dir; if(!srv_add.call(pfilter_obs)){ ROS_INFO("Failed to call add observation"); } ros::spinOnce(); while(!rayt.particleHandler.newParticles){ ROS_INFO_THROTTLE(10, "Waiting for new particles..."); ros::spinOnce(); ros::Duration(.1).sleep(); } i ++; } // std::ofstream myfile; // myfile.open("/home/shiyuan/Documents/ros_marsarm/diff.csv", std::ios::out|std::ios::app); // myfile << "\n"; // myfile.close(); // myfile.open("/home/shiyuan/Documents/ros_marsarm/time.csv", std::ios::out|std::ios::app); // myfile << "\n"; // myfile.close(); // myfile.open("/home/shiyuan/Documents/ros_marsarm/diff_trans.csv", std::ios::out|std::ios::app); // myfile << "\n"; // myfile.close(); // myfile.open("/home/shiyuan/Documents/ros_marsarm/diff_rot.csv", std::ios::out|std::ios::app); // myfile << "\n"; // myfile.close(); // myfile.open("/home/shiyuan/Documents/ros_marsarm/workspace_max.csv", std::ios::out|std::ios::app); // myfile << "\n"; // myfile.close(); // myfile.open("/home/shiyuan/Documents/ros_marsarm/workspace_min.csv", std::ios::out|std::ios::app); // myfile << "\n"; // myfile.close(); ROS_INFO("Finished all action"); }
int main() { outPut[0]="scene1.test"; outPut[1]="scene1-camera1.test"; outPut[2]="scene1-camera2.test"; outPut[3]="scene1-camera3.test"; outPut[4]="scene1-camera4.test"; outPut[5]="scene2-camera1.test"; outPut[6]="scene2-camera2.test"; outPut[7]="scene2-camera3.test"; outPut[8]="scene3.test"; outPut[9]="self.test"; outPut[10]="spheres.test"; outPut[11]="self1.test"; outPut[12]="spheres2.test"; outPut[13]="spheres3.test"; outPut[14]="scene3-2.test"; outPut[15]="self2.test"; outPut[16]="self3.test"; outPut[17]="self4.test"; outPut[18]="self2-1.test"; outPut[19]="self2-2.test"; outPut[20]="self4-2.test"; outPut[21]="self4-3.test"; outPut[22]="spheres4.test"; outPut[23]="self4-4.test"; outPut[24]="self4-5.test"; outPut[25]="self4-6.test"; outPut[26]="self2-3.test"; outPut[27]="self3-1.test"; outPut[28]="scene1-4.test"; outPut[29]="spheres4-1.test"; outPut[30]="spheres4-2.test"; outPut[31]="spheres4-3.test"; outPut[32]="spheres4-4.test"; outPut[33]="spheres4-5.test"; for (int names=0; names<TESTS; names++){ Film myImage; Camera myCamera; vector<Primitive> myPrimitives(1, Primitive()); vector<Triangle> myTriangles(1, Triangle()); Parser myParser; Sample currSample(0,0); Sampler mySampler; RayTracer myTracer; Ray currRay(vec3(0,0,0), vec3(0,0,0), vec3(0,0,0)); Color currColor; inputfile.open(outPut[names].c_str()); int *x,*y; x =(int*) malloc(sizeof(int)); y =(int*) malloc(sizeof(int)); myParser.initialparse(inputfile, x, y); myCamera.SetAspect(x, y); myImage.SetFilm(*x,*y); myImage.InitializeFilm(); mySampler.SetSamplerSize(*x, *y); myParser.parsefile(inputfile, &myCamera, &myTracer, &maxDepth); myTracer.SetDepth(maxDepth); cout<<"maxDepth: "<<maxDepth<<endl; assert(maxDepth>=2); while(mySampler.GetSample(&currSample)){ currColor.SetColor(0.0,0.0,0.0); // reset currColor to 0 every time myCamera.GenerateRay(currSample,&currRay); myTracer.traceRay(&currRay, 0, &currColor); myImage.Commit(currSample, currColor); } myImage.WriteImage(outPut[names]); inputfile.close(); delete x; delete y; cout << "finished " << outPut[names] << endl; } cout << "finished everything" << endl; return 0; }
GLuint render(RayTracer& ray_tracer, SceneParser& scene, const Args& args) { auto image_pixels = Vec2i(args.width, args.height); // Construct images unique_ptr<Image> image, depth_image, normals_image; if (!args.output_file.empty()) { image.reset(new Image(image_pixels, ImageFormat::RGBA_Vec4f)); image->clear(Vec4f()); } if (!args.depth_file.empty()) { depth_image.reset(new Image(image_pixels, ImageFormat::RGBA_Vec4f)); depth_image->clear(Vec4f()); } if (!args.normals_file.empty()) { normals_image.reset(new Image(image_pixels, ImageFormat::RGBA_Vec4f)); normals_image->clear(Vec4f()); } // EXTRA // The Filter and Film objects are not used by the starter code. They provide starting points // for implementing smarter supersampling, whereas the required type of less fancy supersampling // can be implemented by taking the average of the samples drawn from each pixel. // unique_ptr<Filter> filter(Filter::constructFilter(args.reconstruction_filter, args.filter_radius)); // Film film(image.get(), filter.get())); // progress counter atomic<int> lines_done = 0; // Main render loop! // Loop through all the pixels in the image // Generate all the samples // Fire rays // Compute shading // Accumulate into image // Loop over scanlines. #pragma omp parallel for // Uncomment this & enable OpenMP in project for parallel rendering (see handout) for (int j = 0; j < args.height; ++j) { // Print progress info if (args.show_progress) ::printf("%.2f%% \r", lines_done * 100.0f / image_pixels.y); // Construct sampler. auto sampler = unique_ptr<Sampler>(Sampler::constructSampler(args.sampling_pattern, args.num_samples)); // Loop over pixels on a scanline for (int i = 0; i < args.width; ++i) { // Loop through all the samples for this pixel. Vec3f sample_color = Vec3f(0.0f); Vec3f depth_color = Vec3f(0.0f); Vec3f normal_color = Vec3f(0.0f); for (int n = 0; n < args.num_samples; ++n) { // Get the offset of the sample inside the pixel. // You need to fill in the implementation for this function when implementing supersampling. // The starter implementation only supports one sample per pixel through the pixel center. Vec2f offset = sampler->getSamplePosition(n); // Convert floating-point pixel coordinate to canonical view coordinates in [-1,1]^2 // You need to fill in the implementation for Camera::normalizedImageCoordinateFromPixelCoordinate. Vec2f ray_xy = Camera::normalizedImageCoordinateFromPixelCoordinate(Vec2f(float(i), float(j)) + offset, image_pixels); // Generate the ray using the view coordinates // You need to fill in the implementation for this function. Ray r = scene.getCamera()->generateRay(ray_xy); // Trace the ray! Hit hit; float tmin = scene.getCamera()->getTMin(); // You should fill in the gaps in the implementation of traceRay(). // args.bounces gives the maximum number of reflections/refractions that should be traced. sample_color = sample_color + ray_tracer.traceRay(r, tmin, args.bounces, 1.0f, hit, Vec3f(1.0f)); // YOUR CODE HERE (R9) // This starter code only supports one sample per pixel and consequently directly // puts the returned color to the image. You should extend this code to handle // multiple samples per pixel. Also sample the depth and normal visualization like the color. // The requirement is just to take an average of all the samples within the pixel // (so-called "box filtering"). Note that this starter code does not take an average, // it just assumes the first and only sample is the final color. // For extra credit, you can implement more sophisticated ones, such as "tent" and bicubic // "Mitchell-Netravali" filters. This requires you to implement the addSample() // function in the Film class and use it instead of directly setting pixel values in the image. if (depth_image) { // YOUR CODE HERE (R2) // Here you should map the t range [depth_min, depth_max] to the inverted range [1,0] for visualization // Note that closer objects should appear brighter. // //What is the current color? Change the shade im think.... float x = hit.t; //distance float f = 1-(x-args.depth_min) / (args.depth_max - args.depth_min); //Ensure the value is calmped between 1 and 0 f = FW::max(f, 0.0f); f = FW::min(f, 1.0f); depth_color = depth_color + f; //depth_image->setVec4f(Vec2i(i, j), Vec4f(Vec3f(f), 1)); } if (normals_image) { Vec3f n = hit.normal; Vec3f color(fabs(n[0]), fabs(n[1]), fabs(n[2])); color = color.clamp(Vec3f(0), Vec3f(1)); normal_color = normal_color + color; //normals_image->setVec4f( Vec2i( i, j ), Vec4f( color, 1 ) ); } } sample_color = sample_color / (1.0*args.num_samples); depth_color = depth_color / (1.0*args.num_samples); normal_color = normal_color / (1.0*args.num_samples); image->setVec4f(Vec2i(i, j), Vec4f(sample_color, 1)); if (depth_image) depth_image->setVec4f(Vec2i(i, j), Vec4f(Vec3f(depth_color), 1)); if (normals_image) normals_image->setVec4f(Vec2i(i, j), Vec4f(normal_color, 1)); } ++lines_done; } // YOUR CODE HERE (EXTRA) // When you implement smarter filtering, you should normalize the filter weight // carried in the 4th channel. if (image) { } // And finally, save the images as PNG! if (image) { FW::File f(args.output_file.c_str(), FW::File::Create); exportLodePngImage(f, image.get()); } if (depth_image) { FW::File f(args.depth_file.c_str(), FW::File::Create); exportLodePngImage(f, depth_image.get()); } if (normals_image) { FW::File f(args.normals_file.c_str(), FW::File::Create); exportLodePngImage(f, normals_image.get()); } return image->createGLTexture(); }