示例#1
0
void doubleReflectionAlgorithm(Snap* prev,Snap* cur)
{
    double v1[3];
    double v2[3];
    double c1;
    double c2;
    double rl[3];
    double tl[3];

    subVectors(cur->position,prev->position,v1);
    c1 = dotProduct(v1,v1);
    ///rl
    multVector((-2.0/c1)*dotProduct(v1,prev->up),v1,rl);
    addVectors(prev->up,rl,rl);
    ///tl
    multVector((-2.0/c1)*dotProduct(v1,prev->direction),v1,tl);
    addVectors(prev->direction,tl,tl);
    ///v2
    subVectors(cur->direction,tl,v2);
    c2 = dotProduct(v2,v2);
    /// nouveau up
    multVector((-2.0/c2)*dotProduct(v2,rl),v2,cur->up);
    addVectors(rl,cur->up,cur->up);
    crossProduct(cur->direction,cur->up,cur->cross);
}
示例#2
0
文件: vector2.cpp 项目: elf11/3D-Maze
Vector getVectorsNormale3(PVector Vector1, PVector Vector2, PVector Vector3) {
    Vector V1, V2, V3, MyVector;
    
    V1 = subVectors(Vector1, Vector2);
    V2 = subVectors(Vector2, Vector3);
    V3 = getVectorsNormale2(&V1, &V2);
    MyVector = getNormalizedVector(&V3);
    
    return (MyVector);
}
示例#3
0
Plane& Plane::setFromCoplanarPoints( const Vector3& a, const Vector3& b, const Vector3& c) {

  auto v1 = Vector3();
  auto v2 = Vector3();

  auto normal = v1.subVectors( c, b ).cross( v2.subVectors( a, b ) ).normalize();

  // Q: should an error be thrown if normal is zero (e.g. degenerate plane)?
  setFromNormalAndCoplanarPoint( normal, a );

  return *this;

}
示例#4
0
文件: jacobi.c 项目: Egor-mn/NSU
double getResidualNorm(int id, matrix a, vector x, vector y, double c, int size) {
    vector v1, v2;
    multiplyMatrixVector(a, x, size, v1);
    multiplyVectorConst(y, c, size);
    subVectors(v1, y, size, v2);
    return getVectorNorm(id, v2, size);
}
示例#5
0
void OriginalSpaceKmeans::changeAssignment(int xIndex, int closestCluster, int threadId) {
    unsigned short oldAssignment = assignment[xIndex];
    Kmeans::changeAssignment(xIndex, closestCluster, threadId);
    double *xp = x->data + xIndex * d;
    subVectors(sumNewCenters[threadId]->data +  oldAssignment * d, xp, d);
    addVectors(sumNewCenters[threadId]->data + closestCluster * d, xp, d);
}
示例#6
0
void applyingLeftHouseHolder(Vector* w, Vector** A_n, int ncols){
  int j;
  Vector *aux;
  float lambda, wt_a_2;

  lambda = dotProduct(w,w);
  aux = createVector(A_n[0]->len);
  for(j = 0; j < ncols; j++){
    cpyVectors(w, aux);
    wt_a_2 = dotProduct(w, A_n[j]) * 2;
    if(lambda != 0.0) multByScalar((wt_a_2 / lambda), aux);
    subVectors(A_n[j], aux, A_n[j]);
  }
  free(aux);
}
示例#7
0
int main(int argc, char *argv[])
{
  // Main function for the raytracer. Parses input parameters,
  // sets up the initial blank image, and calls the functions
  // that set up the scene and do the raytracing.
  struct image *im; // Will hold the raytraced image
  struct view *cam; // Camera and view for this scene
  int sx;   // Size of the raytraced image
  int antialiasing; // Flag to determine whether antialiaing is enabled or disabled
  char output_name[1024]; // Name of the output file for the raytraced .ppm image
  struct point3D e;   // Camera view parameters 'e', 'g', and 'up'
  struct point3D g;
  struct point3D up;
  double du, dv;      // Increase along u and v directions for pixel coordinates
  struct point3D pc,d;    // Point structures to keep the coordinates of a pixel and
        // the direction or a ray
  struct ray3D *ray;    // Structure to keep the ray from e to a pixel
  // struct colourRGB col;    // Return colour for raytraced pixels
  struct colourRGB background;   // Background colour
  int i,j;      // Counters for pixel coordinates
  unsigned char *rgbIm;

  if (argc<5)
  {
    fprintf(stderr,"RayTracer: Can not parse input parameters\n");
    fprintf(stderr,"USAGE: RayTracer size rec_depth antialias output_name\n");
    fprintf(stderr,"   size = Image size (both along x and y)\n");
    fprintf(stderr,"   rec_depth = Recursion depth\n");
    fprintf(stderr,"   antialias = A single digit, 0 disables antialiasing. Anything else enables antialiasing\n");
    fprintf(stderr,"   output_name = Name of the output file, e.g. MyRender.ppm\n");
    exit(0);
  }
  sx=atoi(argv[1]);
  MAX_DEPTH=atoi(argv[2]);
  if (atoi(argv[3])==0) antialiasing=0; else antialiasing=1;
    strcpy(&output_name[0],argv[4]);

  fprintf(stderr,"Rendering image at %d x %d\n",sx,sx);
  fprintf(stderr,"Recursion depth = %d\n",MAX_DEPTH);
  if (!antialiasing) fprintf(stderr,"Antialising is off\n");
  else  fprintf(stderr,"Antialising is on\n");
  fprintf(stderr,"Output file name: %s\n",output_name);

  object_list=NULL;
  light_list=NULL;
  texture_list=NULL;

  // Allocate memory for the new image
  im=newImage(sx, sx);
  if (!im)
  {
    fprintf(stderr,"Unable to allocate memory for raytraced image\n");
    exit(0);
  }
  else rgbIm=(unsigned char *)im->rgbdata;

  ///////////////////////////////////////////////////
  // TO DO: You will need to implement several of the
  //        functions below. For Assignment 3, you can use
  //        the simple scene already provided. But
  //        for Assignment 4 you need to create your own
  //        *interesting* scene.
  ///////////////////////////////////////////////////
  buildScene();   // Create a scene. This defines all the
      // objects in the world of the raytracer

  //////////////////////////////////////////
  // TO DO: For Assignment 3 you can use the setup
  //        already provided here. For Assignment 4
  //        you may want to move the camera
  //        and change the view parameters
  //        to suit your scene.
  //////////////////////////////////////////

  // Mind the homogeneous coordinate w of all vectors below. DO NOT
  // forget to set it to 1, or you'll get junk out of the
  // geometric transformations later on.

  // Camera center is at (0,0,-1)
  e.px=0;
  e.py=0;
  e.pz=-1;
  e.pw=1;

  // To define the gaze vector, we choose a point 'pc' in the scene that
  // the camera is looking at, and do the vector subtraction pc-e.
  // Here we set up the camera to be looking at the origin.
  g.px=0-e.px;
  g.py=0-e.py;
  g.pz=0-e.pz;
  g.pw=1;
  // In this case, the camera is looking along the world Z axis, so
  // vector w should end up being [0, 0, -1]

  // Define the 'up' vector to be the Y axis
  up.px=0;
  up.py=1;
  up.pz=0;
  up.pw=1;

  // Set up view with given the above vectors, a 4x4 window,
  // and a focal length of -1 (why? where is the image plane?)
  // Note that the top-left corner of the window is at (-2, 2)
  // in camera coordinates.
  cam=setupView(&e, &g, &up, -1, -2, 2, 4);

  if (cam==NULL)
  {
    fprintf(stderr,"Unable to set up the view and camera parameters. Our of memory!\n");
    cleanup(object_list,light_list, texture_list);
    deleteImage(im);
    exit(0);
  }

  // Set up background colour here
  background.R=0;
  background.G=0;
  background.B=0;

  // Do the raytracing
  //////////////////////////////////////////////////////
  // TO DO: You will need code here to do the raytracing
  //        for each pixel in the image. Refer to the
  //        lecture notes, in particular, to the
  //        raytracing pseudocode, for details on what
  //        to do here. Make sure you undersand the
  //        overall procedure of raytracing for a single
  //        pixel.
  //////////////////////////////////////////////////////
  du=cam->wsize/(sx-1);   // du and dv. In the notes in terms of wl and wr, wt and wb,
  dv=-cam->wsize/(sx-1);    // here we use wl, wt, and wsize. du=dv since the image is
        // and dv is negative since y increases downward in pixel
        // coordinates and upward in camera coordinates.
  colourRGB col;
  point3D origin;
  point3D direction;
  ray3D initialRay;
  colourRGB total;
  int offset;
  int aaSamples;
  fprintf(stderr,"View parameters:\n");
  fprintf(stderr,"Left=%f, Top=%f, Width=%f, f=%f\n",cam->wl,cam->wt,cam->wsize,cam->f);
  fprintf(stderr,"Camera to world conversion matrix (make sure it makes sense!):\n");
  printmatrix(cam->C2W);
  fprintf(stderr,"World to camera conversion matrix:\n");
  printmatrix(cam->W2C);
  fprintf(stderr,"\n");
  fprintf(stderr,"Rendering row: ");
  #pragma omp parallel for schedule(dynamic,32) shared(rgbIm, object_list, light_list, texture_list) private(j)
  for (j=0;j<sx;j++)    // For each of the pixels in the image
  // for (j=2;j<3;j++)
  {
    fprintf(stderr,"%d/%d, ",j,sx);
    #pragma omp parallel for private(origin, direction, col, initialRay, i, aaSamples, offset, total)
    for (i=0;i<sx;i++)
    // for (i=2;i<3;i++)
    {
      if (!antialiasing){
        col.R = 0;
        col.G = 0;
        col.B = 0;
        // = newPoint(cam->wl+i*du,cam->wt+j*dv,cam->f);
        origin.px = cam->wl+i*du;
        origin.py = cam->wt+j*dv;
        origin.pz = cam->f;
        origin.pw = 1.0;
        matVecMult(cam->C2W, &origin);
        // Construct direction vector using Pij - e
        // point3D direction;// = newPoint(origin->px,origin->py, origin->pz);
        direction.px = origin.px;
        direction.py = origin.py;
        direction.pz = origin.pz;
        direction.pw = 1.0;
        subVectors(&e, &direction);
        normalize(&direction);
        // Construct ray using both origin and direction.
        // ray3D initialRay;// = newRay(origin, direction);
        initialRay.p0 = origin;
        initialRay.d = direction;
        // Setting up colors.
        // col = (struct colourRGB *)calloc(1,sizeof(struct colourRGB));

        // Tracing ray
        rayTrace(&initialRay, 1, &col, NULL);
        offset = (sx * j * 3) + (i * 3);
        *(rgbIm + offset + 0) = col.R*255;
        *(rgbIm + offset + 1) = col.G*255;
        *(rgbIm + offset + 2) = col.B*255;
        // Tear down col struct.
        // free(col);
      } else {
        total.R = 0;
        total.G = 0;
        total.B = 0;
        for (aaSamples = 0; aaSamples < 20; aaSamples ++){
          col.R = 0;
          col.G = 0;
          col.B = 0;
          // point3D origin;// = newPoint(cam->wl+i*du,cam->wt+j*dv,cam->f);
          origin.px = cam->wl+(i+drand48()-0.5)*du;
          origin.py = cam->wt+(j+drand48()-0.5)*dv;
          origin.pz = cam->f;
          origin.pw = 1.0;
          matVecMult(cam->C2W, &origin);
          // Construct direction vector using Pij - e
          // point3D direction;// = newPoint(origin->px,origin->py, origin->pz);
          direction.px = origin.px;
          direction.py = origin.py;
          direction.pz = origin.pz;
          direction.pw = 1.0;
          subVectors(&e, &direction);
          normalize(&direction);
          // Construct ray using both origin and direction.
          // ray3D initialRay;// = newRay(origin, direction);
          initialRay.p0 = origin;
          initialRay.d = direction;
          // Setting up colors.
          // col = (struct colourRGB *)calloc(1,sizeof(struct colourRGB));
          // Tracing ray
          rayTrace(&initialRay, 1, &col, NULL);
          total.R += col.R;
          total.G += col.G;
          total.B += col.B;
        }
        offset = (sx * j * 3) + (i * 3);
        total.R = total.R / 20 * 255.0;
        total.G = total.G / 20 * 255.0;
        total.B = total.B / 20 * 255.0;
        *(rgbIm + offset + 0) = total.R;
        *(rgbIm + offset + 1) = total.G;
        *(rgbIm + offset + 2) = total.B;
      }
    } // end for i
  } // end for j
  
  fprintf(stderr,"\nDone!\n");

  // Output rendered image
  imageOutput(im,output_name);

  // Exit section. Clean up and return.
  cleanup(object_list,light_list,texture_list);   // Object, light, and texture lists
  deleteImage(im);          // Rendered image
  free(cam);            // camera view
  exit(0);
}
示例#8
0
void rtShade(struct object3D *obj, struct point3D *p, struct point3D *n, struct ray3D *ray, int depth, double a, double b, struct colourRGB *col)
{
  // This function implements the shading model as described in lecture. It takes
  // - A pointer to the first object intersected by the ray (to get the colour properties)
  // - The coordinates of the intersection point (in world coordinates)
  // - The normal at the point
  // - The ray (needed to determine the reflection direction to use for the global component, as well as for
  //   the Phong specular component)
  // - The current racursion depth
  // - The (a,b) texture coordinates (meaningless unless texture is enabled)
  //
  // Returns:
  // - The colour for this ray (using the col pointer)
  //
 
  // struct colourRGB tmp_col;     // Accumulator for colour components
  // Apply Ambient Light.
  // Ambient = r_a * I_a * 255
  //         = Albedo_ra * objectColor

  double R,G,B;
  double refractHit = 0;
  double reflectHit = 0;
  double numLights = 0;
  point3D nAdj = *n;
  // printf("RT SHADE CALLED.\n");
  if (obj->texImg==NULL) {
    R = obj->col.R;
    G = obj->col.G;
    B = obj->col.B;
    // printf("PICTURE UNDETECTED. USING R G B = [%f, %f, %f]\n", R,G,B);
  } else {
    // printf("Getting image coordinates @ [%f, %f]\n", a, b);
    // Get object colour from the texture given the texture coordinates (a,b), and the texturing function
    // for the object. Note that we will use textures also for Photon Mapping.
    // printf("CHECKING IMAGE COORDTINATES [%f, %f]\n", a, b);
    obj->textureMap(obj->texImg,a,b,&R,&G,&B);  
    // printf("PICTURE DETECTED. USING A B = [%f, %f]\n", a, b);
    // printf("DONE\n");
  }

  // double numSources = 0.0;
  colourRGB reflectCol, refractCol, tmp_col;
  tmp_col.R = 0; tmp_col.G = 0; tmp_col.B = 0;
  reflectCol.R = 0; reflectCol.G = 0; reflectCol.B = 0;
  refractCol.R = 0; refractCol.G = 0; refractCol.B = 0;
  // Create a ray from light source
  pointLS *lightSource = light_list;
  while (lightSource != NULL){
    // numSources += 1.0;
    // This will hold the colour as we process all the components of
    // the Phong illumination model
    // tmp_col.R=;
    // tmp_col.G=obj->col.G;
    // tmp_col.B=obj->col.B;
    // Set up ray FindFirstHit.

    ray3D lightDirection;// = newRay(p, &lightSource->p0);
    lightDirection.p0 = *p;
    lightDirection.d = lightSource->p0;
    subVectors(p, &lightDirection.d);
    double tempLambda;
    object3D *tempObject;
    point3D tempP;
    point3D tempN;
    // Obtain intersection information.
    findFirstHit(&lightDirection, &tempLambda, obj, &tempObject, &tempP, &tempN, &a, &b);
    if (obj->frontAndBack && dot(&nAdj, &lightDirection.d) < 0){
      nAdj.px = fabs(nAdj.px) * fabs(lightDirection.d.px) / lightDirection.d.px;
      nAdj.py = fabs(nAdj.py) * fabs(lightDirection.d.py) / lightDirection.d.py;
      nAdj.pz = fabs(nAdj.pz) * fabs(lightDirection.d.pz) / lightDirection.d.pz;
    }
    // if (a > 0.0001 && b > 0.0001){
    //   // printf("a, b returned as %f, %f\n", a, b);
    // }
    // Apply Ambient Light.
    // Ambient = r_a * I_a * 255
    //         = Albedo_ra * objectColor
    tmp_col.R += (R * obj->alb.ra * lightSource->col.R);// / NUM_LIGHTS;// lightSource->col.R;// / NUM_LIGHTS;
    tmp_col.G += (G * obj->alb.ra * lightSource->col.G);// / NUM_LIGHTS;// lightSource->col.G;// / NUM_LIGHTS;
    tmp_col.B += (B * obj->alb.ra * lightSource->col.B);// / NUM_LIGHTS;// lightSource->col.B;// / NUM_LIGHTS;
    // printf("AMBIENT LIGHT: [%f, %f, %f]\n", tmp_col.R,tmp_col.G,tmp_col.B);
    // Light source is shining on object if we get here.
    // Set up Phong components:
    point3D s;// = newPoint(lightSource->p0.px - p->px, lightSource->p0.py - p->py, lightSource->p0.pz - p->pz);
    s.px = lightSource->p0.px - p->px;
    s.py = lightSource->p0.py - p->py;
    s.pz = lightSource->p0.pz - p->pz;
    s.pw = 1.0;
    point3D c;// = newPoint(-ray->d.px, -ray->d.py, -ray->d.pz);
    c.px = -ray->d.px;
    c.py = -ray->d.py;
    c.pz = -ray->d.pz;
    c.pw = 1.0;
    subVectors(p, &s);
    // normalize(s);
    point3D m; //= newPoint(2 * dot(s,n) * n->px,
               //           2 * dot(s,n) * n->py,
               //           2 * dot(s,n) * n->pz);
    m.px = 2 * dot(&s,&nAdj) * nAdj.px;
    m.py = 2 * dot(&s,&nAdj) * nAdj.py;
    m.pz = 2 * dot(&s,&nAdj) * nAdj.pz;
    subVectors(&s, &m);
    normalize(&m);
    normalize(&nAdj);
    normalize(&s);
    double dotProduct = dot(&nAdj,&s);

    if (tempLambda < 0.000000001 || tempLambda > 1.0000000){
      // Apply Diffuse
      // Diffuse = r_d * I_d * max(0, n dot s)
      //         = Albedo_rd * lightSource * max(0, n dot s)
      tmp_col.R += (obj->alb.rd * lightSource->col.R * max(0, dotProduct)) * R;// +
      tmp_col.G += (obj->alb.rd * lightSource->col.G * max(0, dotProduct)) * G;// +
      tmp_col.B += (obj->alb.rd * lightSource->col.B * max(0, dotProduct)) * B;// +
      // printf("DIFFUSE LIGHT: [%f, %f, %f]\n", tmp_col.R,tmp_col.G,tmp_col.B);
      // Apply Specular
      // Specular = r_s * I_s * max(0, c dot m) ^ shinyness
      //          = Albedo_rs * lightSource * max(0, c dot m) ^ shinyness
      tmp_col.R += (obj->alb.rs * lightSource->col.R * pow(max(0, dot(&c, &m)), obj->shinyness));
      tmp_col.G += (obj->alb.rs * lightSource->col.G * pow(max(0, dot(&c, &m)), obj->shinyness));
      tmp_col.B += (obj->alb.rs * lightSource->col.B * pow(max(0, dot(&c, &m)), obj->shinyness));
    }
    colourRGB reflectedColor;
    reflectedColor.R = 0; reflectedColor.G = 0; reflectedColor.B = 0;
    if (depth < MAX_DEPTH){
      if (obj->alb.rs > 0){
        // Apply Reflections
        // This involves shooting a ray out from the intersection
        // position and obtaining a colour from that ray.
        // First I will obtain the mirror vector:
        point3D refPoint;// = newPoint(-2.0 * dot(&ray->d, n) * n->px,
                          //           -2.0 * dot(&ray->d, n) * n->py,
                          //           -2.0 * dot(&ray->d, n) * n->pz);
        refPoint.px = -2.0 * dot(&ray->d, &nAdj) * nAdj.px;
        refPoint.py = -2.0 * dot(&ray->d, &nAdj) * nAdj.py;
        refPoint.pz = -2.0 * dot(&ray->d, &nAdj) * nAdj.pz;
        refPoint.pw = 1.0;
        addVectors(&ray->d, &refPoint);
        normalize(&refPoint);
        ray3D refRay;// = newRay(p, refPoint);
        refRay.p0 = *p;
        refRay.d = refPoint;
        // Then I will obtain the colour using mirror vector.
        
        reflectHit ++;
        if (reflectCol.R == 0) {
          rayTrace(&refRay, ++depth, &reflectedColor, tempObject);
          reflectCol.R = obj->alb.rg * reflectedColor.R * R * NUM_LIGHTS;// * lightSource->col.R;
          reflectCol.G = obj->alb.rg * reflectedColor.G * G * NUM_LIGHTS;// * lightSource->col.G;
          reflectCol.B = obj->alb.rg * reflectedColor.B * B * NUM_LIGHTS;// * lightSource->col.B;
        }
        // Clean up created pointers.
        // free(refRay);
        // free(refPoint);
        // DRT Colours
      }
      if (obj->alpha < 1){
        // Apply Refractions
        // This involves shooting a ray out from the intersection
        // position, finding the angle of refraction and then
        // shooting a ray out in that general direction.
        struct point3D vVec, neg_vVec, refractDir, neg_n, neg_d;
        vVec.px = p->px - ray->p0.px; vVec.py = p->py - ray->p0.py; vVec.pz = p->pz - ray->p0.pz;
        neg_vVec.px = - vVec.px; neg_vVec.py = - vVec.py; neg_vVec.pz = - vVec.pz;
        neg_n.px = -nAdj.px; neg_n.py = -nAdj.py; neg_n.pz = -nAdj.pz;
        neg_d.px = -ray->d.px; neg_d.py = -ray->d.py; neg_d.pz = -ray->d.pz;
        refractDir.pw = 1.0;
        if (dot(n, &vVec) < 0){
          // Entering medium
          double nr = 1 / obj->r_index;
          double rootContent = sqrt(1 - pow(nr, 2) * (1 - pow(dot(&neg_vVec, n), 2)));
          if (rootContent >= 0.0) {
            refractDir.px = (nr * (dot(&neg_vVec, &nAdj)- rootContent)*nAdj.px - (nr * neg_vVec.px));
            refractDir.py = (nr * (dot(&neg_vVec, &nAdj)- rootContent)*nAdj.py - (nr * neg_vVec.py));
            refractDir.pz = (nr * (dot(&neg_vVec, &nAdj)- rootContent)*nAdj.pz - (nr * neg_vVec.pz));
          }
        } else {
          // Exiting medium
          double nr = obj->r_index;
          double rootContent = sqrt(1 - pow(nr, 2) * (1-(pow(dot(&neg_n, &neg_d), 2))));
          if (rootContent >= 0.0) {
            refractDir.px = (nr * (dot(&neg_d, &neg_n) - rootContent) * neg_n.px - (nr * neg_d.px));
            refractDir.py = (nr * (dot(&neg_d, &neg_n) - rootContent) * neg_n.py - (nr * neg_d.py));
            refractDir.pz = (nr * (dot(&neg_d, &neg_n) - rootContent) * neg_n.pz - (nr * neg_d.pz));
          }
        }
        ray3D refRay;
        refRay.p0.px = p->px + 0.00009;
        refRay.p0.py = p->py + 0.00009;
        refRay.p0.pz = p->pz + 0.00009;
        refRay.p0.pw = 1.0;
        refRay.d = refractDir;
        colourRGB refractedColor;
        refractedColor.R = 0; refractedColor.G = 0; refractedColor.B = 0;
        // colourRGB refractedColor;
        rayTrace(&refRay, ++depth, &refractedColor, obj);
        refractHit ++;
        refractCol.R += (1 - obj->alpha) * refractedColor.R * R;// * lightSource->col.R;// / NUM_LIGHTS;
        refractCol.G += (1 - obj->alpha) * refractedColor.G * G;// * lightSource->col.G;// / NUM_LIGHTS;
        refractCol.B += (1 - obj->alpha) * refractedColor.B * B;// * lightSource->col.B;// / NUM_LIGHTS;
      }
    }
    // tmp_col.R += reflectCol.R + refractCol.R;// + reflectCol.R + refractCol.R;
    // tmp_col.G += reflectCol.G + refractCol.G;// + reflectCol.G + refractCol.G;
    // tmp_col.B += reflectCol.B + refractCol.B;// + reflectCol.B + refractCol.B;
    // Move pointer to next light source.
    numLights ++;
    lightSource = lightSource->next;
    // Clean up created pointers.
    // free(m);
    // free(s);
    // free(c);
    // free(lightDirection);
  }
 
  // Finally update the color with what we've calculated.

  // col->R = min(drtR/numSources, 1);
  // col->G = min(drtG/numSources, 1);
  // col->B = min(drtB/numSources, 1);
  col->R = min(tmp_col.R + reflectCol.R/fmax(NUM_LIGHTS, 1) + refractCol.R/fmax(refractHit, 1), 1);
  col->G = min(tmp_col.G + reflectCol.G/fmax(NUM_LIGHTS, 1) + refractCol.G/fmax(refractHit, 1), 1);
  col->B = min(tmp_col.B + reflectCol.B/fmax(NUM_LIGHTS, 1) + refractCol.B/fmax(refractHit, 1), 1);
  //   col->R = (n->px + 1) / 2;
  // col->G = (n->py + 1) / 2;
  // col->B = (n->pz + 1) / 2;
  // printf("RTSHADE COMPLETE %f, [%f, %f, %f]\n", numSources, col->R, col->G, col->B);
 
  return;
}
示例#9
0
void ObjectTracker::matchObjects(CvPoint *newCenters, int size) {
    if (currObjs_.empty()) {
        for (int i=0; i<size; i++) {
            createNewObject(newCenters[i]);
        }
        return;
    }

    if (size == 0) {
        removeObjects();
    }

    int parameter = imgSize_.width + imgSize_.height;
    int rows = currObjs_.size();
    int cols = size;
    objsM_ = new double*[rows];

    for (int i=0; i<rows; i++) {
        objsM_[i] = new double[cols];
    }
    CvPoint *predictedPnts = new CvPoint[rows];
    for (int i=0; i<rows; i++) {
        predictedPnts[i] = currObjs_[i].predictNextPosition();
    }

    for (int i=0; i<rows; i++) {
        for (int j=0; j<cols; j++) {
            CvPoint distance = subVectors(predictedPnts[i], newCenters[j]);
            objsM_[i][j] = getMagnitude(distance) / parameter;
        }
    }

    vector<int> list;
    // Check columns
    for (int j=0; j<cols; j++) {
        list.clear();
        for (int i=0; i<rows; i++) {
            if (objsM_[i][j] < matchThreshold_) {
                list.push_back(i);
            }
        }
        switch (list.size()) {
        case 0: // New object
            createNewObject(newCenters[j]);
            break;
        case 1:
            break;
        default:
            break;
        }
    }

    // Check rows
    vector<int> removedObjectsv;
    removedObjectsv.clear();
    for (int i=0; i<rows; i++) {
        list.clear();
        for (int j=0; j<cols; j++) {
            if (objsM_[i][j] < matchThreshold_) {
                list.push_back(j);
            }
        }

        switch(list.size()) {
        case 0: // No more match
            removedObjectsv.push_back(i);
            break;
        case 1:
            currObjs_[i].correctPosition(newCenters[list[0]]);
            break;
        default:
            break;
        }
    }

    removeObjects(&removedObjectsv);


    //printMatrix(objsM_, rows, cols);
    for (int i=0; i<rows; i++) {
        delete[] objsM_[i];
    }
    delete[] objsM_;
}
示例#10
0
QJSValue THREEVector3::sub(QJSValue value1, QJSValue value2) {
    qDebug() << "THREE.Vector3: .sub() now only accepts one argument. Use .subVectors( a, b ) instead.";
    return subVectors(value1, value2);
}
示例#11
0
void TorusKnotGeometry::initialize( float radius,
                 float tube,
                 size_t radialSegments,
                 size_t tubularSegments,
                 float p,
                 float q,
                 float heightScale ) {

	std::vector<std::vector<int>> grid;
  grid.resize( radialSegments );

	auto tang = Vector3();
	auto n = Vector3();
	auto bitan = Vector3();

	for ( size_t i = 0; i < radialSegments; ++ i ) {


		auto u = (float)i / (float)radialSegments * 2.f * p * Math::PI();
		Vector3 p1 = getPos( u, q, p, radius, heightScale );
		Vector3 p2 = getPos( u + 0.01f, q, p, radius, heightScale );

		tang.subVectors( p2, p1 );
		n.addVectors( p2, p1 );

		bitan.crossVectors( tang, n );
		n.crossVectors( bitan, tang );
		bitan.normalize();
		n.normalize();

		for ( size_t j = 0; j < tubularSegments; ++ j ) {

			auto v = (float)j / (float)tubularSegments * 2.f * Math::PI();
			auto cx = - tube * Math::cos( v ); // TODO: Hack: Negating it so it faces outside.
			auto cy = tube * Math::sin( v );

			auto pos = Vector3();
			pos.x = p1.x + cx * n.x + cy * bitan.x;
			pos.y = p1.y + cx * n.y + cy * bitan.y;
			pos.z = p1.z + cx * n.z + cy * bitan.z;

      this->vertices.push_back( pos );
			grid[ i ].push_back( this->vertices.size() - 1 );

		}

	}

	for ( size_t i = 0; i < radialSegments; ++ i ) {

		for ( size_t j = 0; j < tubularSegments; ++ j ) {

			auto ip = ( i + 1 ) % radialSegments;
			auto jp = ( j + 1 ) % tubularSegments;

			auto a = grid[ i ][ j ];
			auto b = grid[ ip ][ j ];
			auto c = grid[ ip ][ jp ];
			auto d = grid[ i ][ jp ];

			auto uva = Vector2( (float)i / (float)radialSegments, (float)j / (float)tubularSegments );
			auto uvb = Vector2( (float)( i + 1 ) / (float)radialSegments, (float)j / (float)tubularSegments );
			auto uvc = Vector2( (float)( i + 1 ) / (float)radialSegments, (float)( j + 1 ) / (float)tubularSegments );
			auto uvd = Vector2( (float)i / (float)radialSegments, (float)( j + 1 ) / (float)tubularSegments );

			this->faces.push_back( Face3( a, b, d ) );
			this->faceVertexUvs[ 0 ].push_back( toArray( uva, uvb, uvd ) );

			this->faces.push_back( Face3( b, c, d ) );
			this->faceVertexUvs[ 0 ].push_back( toArray( uvb.clone(), uvc, uvd.clone() ) );

		}
	}

	this->computeCentroids();
	this->computeFaceNormals();
	this->computeVertexNormals();

}
示例#12
0
	Vector3& lerpVectors(const Vector3& v1, const Vector3& v2, T alpha){
		subVectors(v2, v1).multiply(alpha).add(v1);

		return *this;
	}