void Create2DImage(QImage& image, const QString& sText, const QFont& font, unsigned short dimension) { qreal coeff = (static_cast<qreal>(QFontMetrics(font).height()) / static_cast<qreal>(dimension)); qreal newFontPointSize = (font.pointSizeF() / coeff); QFont fontScaled(font); fontScaled.setPointSizeF(newFontPointSize); int width = (3 + QFontMetrics(fontScaled).boundingRect(sText).width()); QImage imageOutput(width, dimension, QImage::Format_Mono); imageOutput.fill((uint)1); { QPainter painter(&imageOutput); painter.setFont(fontScaled); painter.setPen(QColor(Qt::black)); const QRect rectDraw( QPoint(0, 0), QSize(width, dimension)); painter.drawText(rectDraw, (Qt::AlignLeft | Qt::AlignTop), sText); } image = imageOutput; }
void RetinaImpl::applyFastToneMapping(InputArray inputImage, OutputArray outputToneMappedImage) { // first convert input image to the compatible format : const bool colorMode = _convertCvMat2ValarrayBuffer(inputImage.getMat(), _inputBuffer); const unsigned int nbPixels=_retinaFilter->getOutputNBrows()*_retinaFilter->getOutputNBcolumns(); // process tone mapping if (colorMode) { std::valarray<float> imageOutput(nbPixels*3); _retinaFilter->runRGBToneMapping(_inputBuffer, imageOutput, true, _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity, _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity); _convertValarrayBuffer2cvMat(imageOutput, _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), true, outputToneMappedImage); }else { std::valarray<float> imageOutput(nbPixels); _retinaFilter->runGrayToneMapping(_inputBuffer, imageOutput, _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity, _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity); _convertValarrayBuffer2cvMat(imageOutput, _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, outputToneMappedImage); } }
int main(int argc, char *argv[]) { // Main function for the raytracer. Parses input parameters, // sets up the initial blank image, and calls the functions // that set up the scene and do the raytracing. struct image *im; // Will hold the raytraced image struct view *cam; // Camera and view for this scene int sx; // Size of the raytraced image int antialiasing; // Flag to determine whether antialiaing is enabled or disabled char output_name[1024]; // Name of the output file for the raytraced .ppm image struct point3D e; // Camera view parameters 'e', 'g', and 'up' struct point3D g; struct point3D up; double du, dv; // Increase along u and v directions for pixel coordinates struct point3D pc,d; // Point structures to keep the coordinates of a pixel and // the direction or a ray struct ray3D *ray; // Structure to keep the ray from e to a pixel // struct colourRGB col; // Return colour for raytraced pixels struct colourRGB background; // Background colour int i,j; // Counters for pixel coordinates unsigned char *rgbIm; if (argc<5) { fprintf(stderr,"RayTracer: Can not parse input parameters\n"); fprintf(stderr,"USAGE: RayTracer size rec_depth antialias output_name\n"); fprintf(stderr," size = Image size (both along x and y)\n"); fprintf(stderr," rec_depth = Recursion depth\n"); fprintf(stderr," antialias = A single digit, 0 disables antialiasing. Anything else enables antialiasing\n"); fprintf(stderr," output_name = Name of the output file, e.g. MyRender.ppm\n"); exit(0); } sx=atoi(argv[1]); MAX_DEPTH=atoi(argv[2]); if (atoi(argv[3])==0) antialiasing=0; else antialiasing=1; strcpy(&output_name[0],argv[4]); fprintf(stderr,"Rendering image at %d x %d\n",sx,sx); fprintf(stderr,"Recursion depth = %d\n",MAX_DEPTH); if (!antialiasing) fprintf(stderr,"Antialising is off\n"); else fprintf(stderr,"Antialising is on\n"); fprintf(stderr,"Output file name: %s\n",output_name); object_list=NULL; light_list=NULL; texture_list=NULL; // Allocate memory for the new image im=newImage(sx, sx); if (!im) { fprintf(stderr,"Unable to allocate memory for raytraced image\n"); exit(0); } else rgbIm=(unsigned char *)im->rgbdata; /////////////////////////////////////////////////// // TO DO: You will need to implement several of the // functions below. For Assignment 3, you can use // the simple scene already provided. But // for Assignment 4 you need to create your own // *interesting* scene. /////////////////////////////////////////////////// buildScene(); // Create a scene. This defines all the // objects in the world of the raytracer ////////////////////////////////////////// // TO DO: For Assignment 3 you can use the setup // already provided here. For Assignment 4 // you may want to move the camera // and change the view parameters // to suit your scene. ////////////////////////////////////////// // Mind the homogeneous coordinate w of all vectors below. DO NOT // forget to set it to 1, or you'll get junk out of the // geometric transformations later on. // Camera center is at (0,0,-1) e.px=0; e.py=0; e.pz=-1; e.pw=1; // To define the gaze vector, we choose a point 'pc' in the scene that // the camera is looking at, and do the vector subtraction pc-e. // Here we set up the camera to be looking at the origin. g.px=0-e.px; g.py=0-e.py; g.pz=0-e.pz; g.pw=1; // In this case, the camera is looking along the world Z axis, so // vector w should end up being [0, 0, -1] // Define the 'up' vector to be the Y axis up.px=0; up.py=1; up.pz=0; up.pw=1; // Set up view with given the above vectors, a 4x4 window, // and a focal length of -1 (why? where is the image plane?) // Note that the top-left corner of the window is at (-2, 2) // in camera coordinates. cam=setupView(&e, &g, &up, -1, -2, 2, 4); if (cam==NULL) { fprintf(stderr,"Unable to set up the view and camera parameters. Our of memory!\n"); cleanup(object_list,light_list, texture_list); deleteImage(im); exit(0); } // Set up background colour here background.R=0; background.G=0; background.B=0; // Do the raytracing ////////////////////////////////////////////////////// // TO DO: You will need code here to do the raytracing // for each pixel in the image. Refer to the // lecture notes, in particular, to the // raytracing pseudocode, for details on what // to do here. Make sure you undersand the // overall procedure of raytracing for a single // pixel. ////////////////////////////////////////////////////// du=cam->wsize/(sx-1); // du and dv. In the notes in terms of wl and wr, wt and wb, dv=-cam->wsize/(sx-1); // here we use wl, wt, and wsize. du=dv since the image is // and dv is negative since y increases downward in pixel // coordinates and upward in camera coordinates. colourRGB col; point3D origin; point3D direction; ray3D initialRay; colourRGB total; int offset; int aaSamples; fprintf(stderr,"View parameters:\n"); fprintf(stderr,"Left=%f, Top=%f, Width=%f, f=%f\n",cam->wl,cam->wt,cam->wsize,cam->f); fprintf(stderr,"Camera to world conversion matrix (make sure it makes sense!):\n"); printmatrix(cam->C2W); fprintf(stderr,"World to camera conversion matrix:\n"); printmatrix(cam->W2C); fprintf(stderr,"\n"); fprintf(stderr,"Rendering row: "); #pragma omp parallel for schedule(dynamic,32) shared(rgbIm, object_list, light_list, texture_list) private(j) for (j=0;j<sx;j++) // For each of the pixels in the image // for (j=2;j<3;j++) { fprintf(stderr,"%d/%d, ",j,sx); #pragma omp parallel for private(origin, direction, col, initialRay, i, aaSamples, offset, total) for (i=0;i<sx;i++) // for (i=2;i<3;i++) { if (!antialiasing){ col.R = 0; col.G = 0; col.B = 0; // = newPoint(cam->wl+i*du,cam->wt+j*dv,cam->f); origin.px = cam->wl+i*du; origin.py = cam->wt+j*dv; origin.pz = cam->f; origin.pw = 1.0; matVecMult(cam->C2W, &origin); // Construct direction vector using Pij - e // point3D direction;// = newPoint(origin->px,origin->py, origin->pz); direction.px = origin.px; direction.py = origin.py; direction.pz = origin.pz; direction.pw = 1.0; subVectors(&e, &direction); normalize(&direction); // Construct ray using both origin and direction. // ray3D initialRay;// = newRay(origin, direction); initialRay.p0 = origin; initialRay.d = direction; // Setting up colors. // col = (struct colourRGB *)calloc(1,sizeof(struct colourRGB)); // Tracing ray rayTrace(&initialRay, 1, &col, NULL); offset = (sx * j * 3) + (i * 3); *(rgbIm + offset + 0) = col.R*255; *(rgbIm + offset + 1) = col.G*255; *(rgbIm + offset + 2) = col.B*255; // Tear down col struct. // free(col); } else { total.R = 0; total.G = 0; total.B = 0; for (aaSamples = 0; aaSamples < 20; aaSamples ++){ col.R = 0; col.G = 0; col.B = 0; // point3D origin;// = newPoint(cam->wl+i*du,cam->wt+j*dv,cam->f); origin.px = cam->wl+(i+drand48()-0.5)*du; origin.py = cam->wt+(j+drand48()-0.5)*dv; origin.pz = cam->f; origin.pw = 1.0; matVecMult(cam->C2W, &origin); // Construct direction vector using Pij - e // point3D direction;// = newPoint(origin->px,origin->py, origin->pz); direction.px = origin.px; direction.py = origin.py; direction.pz = origin.pz; direction.pw = 1.0; subVectors(&e, &direction); normalize(&direction); // Construct ray using both origin and direction. // ray3D initialRay;// = newRay(origin, direction); initialRay.p0 = origin; initialRay.d = direction; // Setting up colors. // col = (struct colourRGB *)calloc(1,sizeof(struct colourRGB)); // Tracing ray rayTrace(&initialRay, 1, &col, NULL); total.R += col.R; total.G += col.G; total.B += col.B; } offset = (sx * j * 3) + (i * 3); total.R = total.R / 20 * 255.0; total.G = total.G / 20 * 255.0; total.B = total.B / 20 * 255.0; *(rgbIm + offset + 0) = total.R; *(rgbIm + offset + 1) = total.G; *(rgbIm + offset + 2) = total.B; } } // end for i } // end for j fprintf(stderr,"\nDone!\n"); // Output rendered image imageOutput(im,output_name); // Exit section. Clean up and return. cleanup(object_list,light_list,texture_list); // Object, light, and texture lists deleteImage(im); // Rendered image free(cam); // camera view exit(0); }
int main(int argc, char *argv[]) { /* Main routine: - Load the test image specified in the command line - Run both the slow and fast image scaling routines for a few seconds - Compute FPS for both - Save output images to disk - Print out FPS ratio of fast routine to slow routine (should be > 1!) */ unsigned char *src; // Used to store the source image unsigned char *dst; // Will be used to hold the rescaled image int sx, sy; // Resolution of the source image (sx * sy pixels) time_t t0, t1, t2, t3; int c_a,c_b; double FPS_a; double FPS_b; if (argc!=2) { fprintf(stderr,"Usage: Image_Rescale src_name\n"); fprintf(stderr," src_name is the name of the test image (must be in .ppm format)\n"); exit(1); } src=readPPMimage(argv[1], &sx, &sy); if (!src) { fprintf(stderr,"Unable to open test image\n"); exit(1); } fprintf(stderr,"Starting tests...\n"); // Time plain slow routine t1=t0=time(NULL); c_a=0; while(difftime(t1,t0)<3.0) { dst=slow_rescale(src,sx,sy,HD_Xres,HD_Yres); if (dst) {c_a++; free(dst);} else break; t1=time(NULL); } if (c_a>0) { FPS_a=c_a/(double)(t1-t0); fprintf(stderr,"slow image rescaling FPS=%f\n",FPS_a); } else { fprintf(stderr,"Something went wrong!\n"); } // Time your fast routine t3=t2=time(NULL); c_b=0; while(difftime(t3,t2)<3.0) { dst=fast_rescale(src,sx,sy,HD_Xres,HD_Yres); if (dst) {c_b++; free(dst);} else break; t3=time(NULL); } if (c_b>0) { FPS_b=c_b/(double)(t3-t2); fprintf(stderr,"Fast image rescaling FPS=%f\n",FPS_b); fprintf(stderr,"Ratio: %f\n",FPS_b/FPS_a); } else { fprintf(stderr,"Fast routine not implemented\n"); } // Output rescaled images for inspection dst=slow_rescale(src,sx,sy,HD_Xres,HD_Yres); if (dst) {imageOutput(dst,HD_Xres,HD_Yres,"vanilla_rescaled.ppm"); free(dst);} dst=fast_rescale(src,sx,sy,HD_Xres,HD_Yres); if (dst) {imageOutput(dst,HD_Xres,HD_Yres,"fast_rescaled.ppm"); free(dst);} fprintf(stderr,"Done!\n"); free(src); exit(0); }
void WeatherPlugin::outputImage (const QImage &image) { emit imageOutput (image); }