コード例 #1
0
ファイル: Mat4fTest.cpp プロジェクト: knuke/GMath
  TEST_F(Mat4fTest, MethodCreateLookAt)
  {
    for (int i = 0; i < RANDOM_ITERATION_COUNT; i++)
    {
      float* rnd = createRandomVec3f();
      float* rnd2 = createRandomVec3f();
      float* rnd3 = createRandomVec3f();

      Vec3f v(rnd);
      Vec3f v2(rnd2);
      Vec3f v3(rnd3);

      vec3 glm_v = make_vec3(rnd);
      vec3 glm_v2 = make_vec3(rnd2);
      vec3 glm_v3 = make_vec3(rnd3);

      Mat4f m = Mat4f::createLookAt(v, v2, v3);

      mat4 glm_m(1.0f);
      glm_m = glm::lookAt(glm_v, glm_v2, glm_v3);

      cmpMat4f(value_ptr(glm_m), m);

      delete[] rnd;
    }

    Mat4f m =
        Mat4f::createLookAt(Vec3f(0, 0, -10), Vec3f(0, 0, 0), Vec3f(0, 1, 0));
    mat4 glm_m = glm::lookAt(vec3(0, 0, -10), vec3(0, 0, 0), vec3(0, 1, 0));
    cmpMat4f(value_ptr(glm_m), m);
  }
コード例 #2
0
void RX::update_unit_vectors() {

    x_v = normalize(make_vec3(cos(zenith) * cos(azimuth), cos(zenith) * sin(azimuth), -sin(zenith)));

    y_v = normalize(make_vec3(             -sin(azimuth),               cos(azimuth),            0));

    z_v = normalize(make_vec3(sin(zenith) * cos(azimuth),   sin(zenith)*sin(azimuth),  cos(zenith)));

    mat4 m = make_mat4_rotate_axis(z_v,roll);

    x_v = m * x_v;
    y_v = m * y_v;
    z_v = m * z_v;

}
コード例 #3
0
ファイル: kernel_render_ao.c プロジェクト: Benjamin-L/qrender
/** Add a sample to a tile using the AO algorithm
 * @param state global renderer state
 * @param pixels pixel buffer to add sample to
 * @param isect_buffer intersection buffer from samples generated by kernel_ao_gen_samples
 * @param mask_buffer the mask bitmap
 * @param n_pixels number of pixels in buffer
 * @param n_samples number of samples already in the buffer */
void kernel_ao_add_sample(state_t state, vec3* pixels, hit_t* isect_buffer, int* mask_buffer, sample_t* sample_buffer, int n_samples, int i) {
	//compute the sample
	float l;
	if(mask_buffer[i] == 1) {
		l = 0.f;
	}
	else if(isect_buffer[i].t == -1) {
		l = 1.f;
	}
	else {
		l = minf(1.f,isect_buffer[i].t/state.ao_params.length);
	}
	vec3 p = make_vec3(l,l,l);

	if(isnan(p.x) || isnan(p.y) || isnan(p.z)) {
		p = make_vec3(0.f,1.f,0.f);
	}
	pixels[i] = div_vec3_scalar(add_vec3(mul_vec3_scalar(pixels[i], n_samples+(1-sample_buffer[i].weight)), mul_vec3_scalar(p, sample_buffer[i].weight)),(n_samples+1));
}
コード例 #4
0
ファイル: garbage_math.c プロジェクト: Frozenhorns/project
double dist_line_pt(t_line *del, t_vec3 *b)
{
  t_vec3 norm;
  t_vec3 aout;
  t_vec3 out;

  make_vec3(del->direction.x, del->direction.y, del->direction.z, &norm);
  normalise_vec3(&norm);
  return (lenth_vec3(vectorial_product3(substract_vec3(b, &(del->point), &aout), &norm, &out)));
}
コード例 #5
0
ファイル: Mat4fTest.cpp プロジェクト: knuke/GMath
  TEST_F(Mat4fTest, MethodCreateInverse)
  {

    for (int i = 0; i < RANDOM_ITERATION_COUNT; i++)
    {
      int rnd_rot_iter = rand() % 10;
      int rnd_trans_iter = rand() % 10;

      Mat4f m(1.0f);
      mat4 glm_m(1.0f);

      for (int j = 0; j < rnd_rot_iter; j++)
      {
        float* rnd_rot = createRandomVec3f();
        float rnd_rad = createRandomF();

        Vec3f rot(rnd_rot);
        vec3 glm_rot = make_vec3(rnd_rot);

        m = m * Mat4f::createRotate(rnd_rad, rot);
        glm_m = glm::rotate(glm_m, rnd_rad, glm_rot);
        delete[] rnd_rot;
      }

      for (int j = 0; j < rnd_trans_iter; j++)
      {
        float* rnd_trans = createRandomVec3f();

        Vec3f trans(rnd_trans);
        vec3 glm_trans = make_vec3(rnd_trans);

        m = m * Mat4f::createTranslate(trans);
        glm_m = glm::translate(glm_m, glm_trans);
        delete[] rnd_trans;
      }

      m = Mat4f::createInverse(m);
      glm_m = glm::inverse(glm_m);

      cmpMat4f(value_ptr(glm_m), m);
    }
  }
コード例 #6
0
ファイル: square_wave.cpp プロジェクト: dizuo/xrender
void init(void)
{
    glShadeModel(GL_SMOOTH);

    glEnable(GL_DEPTH_TEST);
    glCullFace(GL_BACK);

    // load_seed_items("seed_items.txt");
    g_ctrl.product_items(g_seed_items, g_seed_num);

    // glEnable(GL_DEPTH_TEST);
    glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);

    // Add lights for scene.
    glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);

    make_vec3(camera, 0, 0, 200);
    make_vec3(object, 0, 0, 0);
    make_vec3(rot_vec, 0, 0, 0);

}
コード例 #7
0
ファイル: parse_world.c プロジェクト: Benjamin-L/qrender
void load_mesh(char* fname, state_t* state, transform_data_t t, int bsdf_index, char* matname) {

	printf("Loading mesh %s\n",fname);

	//parse file
	const aiScene* scene = aiImportFile(fname, aiProcess_Triangulate | aiProcess_JoinIdenticalVertices);

	qr_assert(scene, "parser", "Could not open mesh file: %s (%s)",fname, aiGetErrorString());
	
	for(int i=0; i<scene->mNumMeshes; i++) {
		bool contin = false;

		if(matname == NULL) {
			contin = true;
		}
		else {
			aiString name;
			aiGetMaterialString(scene->mMaterials[scene->mMeshes[i]->mMaterialIndex], AI_MATKEY_NAME, &name);
			contin = strcmp(matname, &name.data[0]) == 0;
		}

		if(contin) {
			int pind = state->n_primitives;
			state->n_primitives += scene->mMeshes[i]->mNumFaces;
			state->primitives = (primitive_t*)realloc(state->primitives, sizeof(primitive_t)*(state->n_primitives));
			int vind = state->n_verts;
			int vstart = state->n_verts;
			state->n_verts += scene->mMeshes[i]->mNumVertices;
			state->verts = (vec3*)realloc(state->verts, sizeof(vec3)*(state->n_verts));

			for(int j=0; j<scene->mMeshes[i]->mNumVertices; j++) {
				state->verts[vind++] = transform_point(make_vec3(scene->mMeshes[i]->mVertices[j].x, scene->mMeshes[i]->mVertices[j].y, scene->mMeshes[i]->mVertices[j].z), state->transforms[t.index], false);
			}

			for(int j=0; j<scene->mMeshes[i]->mNumFaces; j++) {
				qr_assert(scene->mMeshes[i]->mFaces[j].mNumIndices==3, "parser", "Only triangles are supported (%s)",fname);
				state->primitives[pind].type = PRIMITIVE_TRIANGLE;
				state->primitives[pind].t = 0;
				state->primitives[pind].bsdf = bsdf_index;
				state->primitives[pind++].data = make_primitive_triangle(vstart+scene->mMeshes[i]->mFaces[j].mIndices[0], vstart+scene->mMeshes[i]->mFaces[j].mIndices[1], vstart+scene->mMeshes[i]->mFaces[j].mIndices[2]);
			}
		}
	}

	aiReleaseImport(scene);
}
コード例 #8
0
ファイル: Mat4fTest.cpp プロジェクト: knuke/GMath
  TEST_F(Mat4fTest, MethodCreateScaleFFF)
  {
    for (int i = 0; i < RANDOM_ITERATION_COUNT; i++)
    {
      float* rnd = createRandomVec3f();

      Vec3f v(rnd);
      vec3 glm_v = make_vec3(rnd);

      Mat4f m(1.0f);
      m = Mat4f::createScale(v[0], v[1], v[2]);

      mat4 glm_m(1.0f);
      glm_m = glm::scale(glm_m, glm_v);

      cmpMat4f(value_ptr(glm_m), m);

      delete[] rnd;
    }
  }
コード例 #9
0
ファイル: Mat4fTest.cpp プロジェクト: knuke/GMath
  TEST_F(Mat4fTest, MethodTranslateVec3f)
  {
    for (int i = 0; i < RANDOM_ITERATION_COUNT; i++)
    {
      float* rnd = createRandomVec3f();

      Vec3f v(rnd);
      vec3 glm_v = make_vec3(rnd);

      Mat4f m(1.0f);
      m.translate(v);

      mat4 glm_m(1.0f);
      glm_m = glm::translate(glm_m, glm_v);

      cmpMat4f(value_ptr(glm_m), m);

      delete[] rnd;
    }
  }
コード例 #10
0
ファイル: Mat4fTest.cpp プロジェクト: knuke/GMath
  TEST_F(Mat4fTest, MethodRotateFFFF)
  {
    for (int i = 0; i < RANDOM_ITERATION_COUNT; i++)
    {
      float* rnd = createRandomVec3f();
      float rnd2 = createRandomF();

      Vec3f v(rnd);
      vec3 glm_v = make_vec3(rnd);

      Mat4f m(1.0f);
      m.rotate(rnd2, v[0], v[1], v[2]);

      mat4 glm_m(1.0f);
      glm_m = glm::rotate(glm_m, rnd2, glm_v);

      cmpMat4f(value_ptr(glm_m), m);

      delete[] rnd;
    }
  }
コード例 #11
0
void Visualize(int *argc, char** argv) {
	
	glutInit(argc, argv);											// --- Initializes the GLUT library
    glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);		// --- Sets the initial display mode
    //glutInitWindowSize(1024, 768);									// --- Sets the initial window size
    glutInitWindowSize(2048, 1024);									// --- Sets the initial window size
    glutInitWindowPosition (100, 100);								// --- Sets the initial window position
    glutCreateWindow(argv[0]);										// --- Creates a top level window

	// --- Black background
	//glClearColor(0.0f, 0.0f, 0.0f, 0.0f);							// --- Clears the color buffer
	// --- White background
	glClearColor(1.0f, 1.0f, 1.0f, 1.0f);							// --- Clears the color buffer
    glClearDepth(1.0f);												// --- Specifies the depth value used when the depth buffer is cleared
    glDepthFunc(GL_LESS);											// --- Specifies the value used for depth buffer comparisons
    glEnable(GL_DEPTH_TEST);										// --- Specifies a symbolic constant indicating a GL capability
    glShadeModel(GL_SMOOTH);										// --- Specifies a symbolic value representing a shading technique

	srand((unsigned int)time(NULL));

	cam.pos = make_vec3(0, 3, 15);
    cam.elevation = 0;
    cam.azimut = 0;

	input_loop();
	//pthread_t thread1;

 //   int  iret1 = pthread_create(&thread1, NULL, input_loop, NULL);
 //   if(iret1) {
 //       fprintf(stderr,"Error - pthread_create() return code: %d\n",iret1);
 //       exit(EXIT_FAILURE);
 //   }

    mesh_colors.push_back(make_vec3(0.5,0,0));
    mesh_colors.push_back(make_vec3(0,0.5,0));
    mesh_colors.push_back(make_vec3(0,0,0.5));
    mesh_colors.push_back(make_vec3(1,1,0));
    mesh_colors.push_back(make_vec3(0,1,1));
    mesh_colors.push_back(make_vec3(1,0,1));
    mesh_colors.push_back(make_vec3(1,0.5,0.5));
    mesh_colors.push_back(make_vec3(0.2,0.5,1));
    mesh_colors.push_back(make_vec3(0.2,0.2,0.2));

    ray_colors.push_back(make_vec3(0.2f, 0.8f, 1.0f));
    ray_colors.push_back(make_vec3(0.2f, 1.0f, 1.0f));
    ray_colors.push_back(make_vec3(0.2f, 1.0f, 1.0f));
    ray_colors.push_back(make_vec3(1.0f, 0.2f, 0.2f));
    ray_colors.push_back(make_vec3(0.0f, 0.2f, 1.0f));

    glutDisplayFunc(display);
    glutReshapeFunc(reshape);
    glutKeyboardFunc(keyboardDown);
    glutKeyboardUpFunc(keyboardUp);
    glutTimerFunc(50, loop, 0);
    glutMainLoop();
}
コード例 #12
0
// Why have separate functions for add, subtract, or multiply when
// this one does all 3?
//
//  a + b = add_scaled(a, b, 1)
//  a - b = add_scaled(a, b, -1)
//  0.5 * a = add_scaled(zero, a, 0.5)
//
vec3 add_scaled(vec3 a, vec3 b, real c) {
  // Order scrambled for obfuscation's sake
  return make_vec3(a.c+b.c*c, a.a+c*b.a, b.t*c+a.t);
}
コード例 #13
0
// For the point input, this will compute and return the distance to
// the closest scene primative. It will also update the
// objpoint_or_material to reflect the material of the closest object
// (blue for text, red or white for floor).
real dist_to_scene(vec3 point_or_vecop) {

  // This loop initializes global closest_dist_squared to a big number
  // and point to the function's argument.  We will iterate over pairs
  // of vectors in the vecop_buffer using buffer_offset up to the
  // given buffer_length.
  for (closest_dist_squared=forty,
         point=point_or_vecop, buffer_offset=minus_one; 

       // The first item in the vecop_buffer pair becomes objpoint
       // (arc center or line endpoint). The second item denotes line
       // displacement or arc angle. Note we also offset the
       // x-coordinate of the objpoint to approximately center the
       // text. The last thing we do here is bail out of the loop if
       // we are past end-of-buffer.
       objpoint_or_material = vecop_buffer[++buffer_offset],
         point_or_vecop =
           vecop_buffer[objpoint_or_material.c+=8.8-text_width*.45,
                        ++buffer_offset],
         buffer_offset<=buffer_length;

       // After each loop iteration, we should update the distance
       // query but of course this doesn't get evaluated until after
       // the code below.
       update_distance_query()) 

    // Currently objpoint is arc center or line endpoint.  We must
    // update it to the closest point on the primitive (arc or line)
    // to the current point. First we check if we are arc (op.t!=0) or
    // line (op.t=1):
    objpoint_or_material = point_or_vecop.t ?

      //////////////////////////////////////////////////
      // We are an arc

      // Quantize angles to 90 degree increments
      dot_out_or_tmp = M_PI*half,

      // Get the actual angle and divide by 90 degrees.  We will now
      // need to clamp the angle to deal with the arc endpoints
      // nicely.
      angle_or_width=atan2(point.a-objpoint_or_material.a,
                           point.c-objpoint_or_material.c)/dot_out_or_tmp,

      // op.c encodes lower bound of angle
      lo_or_found=point_or_vecop.c-2,

      // op.a encodes upper bound of angle
      range_or_curglyph=point_or_vecop.a+1,

      // Get the upper end of the range
      hi_or_started=lo_or_found+range_or_curglyph,

      // Now clamp! Note we scale overall result by 90 degrees
      angle_or_width = dot_out_or_tmp*(
         // did we wrap past the upper end of the range?
         angle_or_width>hi_or_started+half*range_or_curglyph ?
         // if so go low
         lo_or_found :
         // otherwise greater than upper end?
         angle_or_width > hi_or_started ?
         // then go hi
         hi_or_started :
         // did we wrap past the lower end of the range?
         angle_or_width<lo_or_found-half*range_or_curglyph ?
         // go hi
         hi_or_started :
         // otherwise less than lower end?
         angle_or_width<lo_or_found ?
         // go lo
         lo_or_found :
         // no clamp needed
         angle_or_width),

      // Now we can finally offset the objpoint (arc center) by angle
      // (note radius = 1), which is convenient.
      add_scaled(objpoint_or_material,
                 make_vec3(cos(angle_or_width), sin(angle_or_width), 0), 1)
      
      :

      //////////////////////////////////////////////////
      // Nope, not arc, a line segment.

      // In this case op is just the displacement of endpoint from
      // startpoint so we can compute the nearest point along the line
      // using the standard formula.
      add_scaled(objpoint_or_material, point_or_vecop,
                 clamp(dot( add_scaled(point, objpoint_or_material, minus_one),
                            point_or_vecop) /
                       dot(point_or_vecop, point_or_vecop) ) );

  //////////////////////////////////////////////////
  // Done with for loop.

  // Now we need to check distance to the floor, which exists
  // everywhere at a coordinate of y = -0.9. here we update the
  // objpoint by copying the point and setting y.
  objpoint_or_material=point;
  objpoint_or_material.a = -.9;

  // Here we are creating a nice checkerboard texture by XOR'ing the x
  // and z coordinates mod 8.
  lo_or_found = point.c/8+8;
  lo_or_found ^= range_or_curglyph=point.t/8+8;

  // Finally, we are going to save the material. First we update the
  // distance query based upon the floor objpoint:
  objpoint_or_material = update_distance_query() ?

    // if the query updated, we are the floor, so red or white
    lo_or_found&1 ? make_vec3(twothirds,0,0) : ones :

    // otherwise the query didn't update so we are blue.
    make_vec3(twothirds,twothirds,1);

  // Finally we return the distance to closest point, offset by 0.45
  // (to give the primitives some thickness)
  return sqrt(closest_dist_squared)-.45;

}
コード例 #14
0
// Finally our main function
int main(int argc_or_image_col, char** argv) {

  //////////////////////////////////////////////////
  // Step 1: fill up the vecop_buffer with the correct primitives for
  // the text to display by parsing the font table.
  //
  // The outer loop here is over characters to print, which come
  // either from argv[1] (if argc > 1), or from progdata table above,
  // in which case they have to be XOR-ed with 5 to get the actual
  // text.
  
  for (// Was a string provided on the command line?
       textptr = argc_or_image_col>1 ?
         // If so, initialize textptr from argv[1] and clear XOR mask
         1[xormask_or_quality=0, argv] :
         // else, initialize textptr from progdata.
         progdata;

       // Go until terminating 0 or text too wide
       *textptr && text_width<24;

       // Increment textptr each iteration.
       ++textptr)

    // Inner loop is over the font table encoded in progdata (see
    // explanation at top). For each text character, we need to try to
    // find the corresponding glyph in the font and push all of its
    // strokes into the vecop_buffer, two vectors at a time.
    
    for (// Initialize found to false
         // Initialize range_or_curglyph to zero ???
         lo_or_found=range_or_curglyph=0,
           
           // Start 10 characters into the text (after "ioccc 2011")
           // but we increment fontptr before dereferencing it because
           // gross, so just offset by 9 here.
           fontptr=progdata+9;

         // Read the next byte from the font table (stop if we
         // hit a terminating 0). 
         (fbyte_or_aacnt=*++fontptr) &&

           // Keep going as long as one of these holds:
           //
           //   - lo_or_found is zero (haven't found char)
           //   - fbyte_or_aacnt >= 40 (lower ones are space !"#$%&')
           //
           // This means that once lo_or_found is nonzero and we hit a
           // START token or a null zero, we are done.
           //
           // Since logical AND is short-circuiting, the update to
           // dot_out_or_tmp only happens if found is true and the
           // current character is greater than or equal to 40.
           //
           // Once that happens, we exit the loop because all three
           // conditions inside these parens are true, making the
           // entire thing false when NOT-ed.
           !(lo_or_found && fbyte_or_aacnt<forty &&
             (dot_out_or_tmp=text_width+=angle_or_width));

         // See if we have found our glyph yet
         lo_or_found ?

           //////////////////////////////////////////////////
           // We have found it -- we are in the current glyph.

           // The current byte should hold an OPCODE and and ARGUMENT.
           // Stash the OPCODE into lo_or_found and increment
           // fontptr. The ARGUMENT should be available by inspecting
           // fbyte_or_aacnt which still holds the byte that fontptr
           // was pointing to.
           lo_or_found=*fontptr++/32,

           // Now get the XCOORD (bits 34) and YCOORD (bits 210) from
           // second byte of stroke instructions. The XCOORD gets
           // added to the x-accumulator (dot_out_or_tmp) and then the
           // vector (XCOORD, YCOORD, 0) is pushed into the
           // vecop_buffer.  This is either the start of a line
           // segment or the center of an arc.
           buffer_offset++[vecop_buffer] =
              make_vec3(dot_out_or_tmp+=*fontptr/8&3,*fontptr&7,0),

           // Time to push the second vector into the vecop_buffer.
           // In the case of an arc (OPCODE == 3), this is starting
           // angle and range, or in the case of a line segment
           // (OPCODE == 2 or OPCODE == 1) this is dx, dy. We need to mirror
           // the x-coordinate if OPCODE == 1.
           //
           // In any event, all of this information is hanging out in
           // the ARGUMENT, which is the lower 5 bits of
           // fbyte_or_aacnt (first byte of stroke instruction pair).
           //
           // Here we also update buffer_length here to be used later
           // in dist_to_scene.
           vecop_buffer[buffer_length=buffer_offset++] =
             make_vec3((fbyte_or_aacnt/8&3)*(lo_or_found<2?minus_one:1),
                       (fbyte_or_aacnt&7)+1e-4,
                       lo_or_found>2),

           // Just a NOP because the ternary operator we're inside of
           // here is of type int.
           1

           :

           //////////////////////////////////////////////////
           // Glyph not found yet.

           // Try to update our found variable...
           (lo_or_found =

            //////////////////////////////////////////////////////////////////////
            // Update cur glyph according to current font table byte.
            
            (range_or_curglyph =

             // Subtract 40 from current font table byte. Less than zero?
             (fbyte_or_aacnt-=forty) < 0 ?

             //////////////////////////////////////////////////
             // Yes, less than zero, so this is a START token.
             
             // Glyph width given by byte - 34 = byte - 40 + 6
             angle_or_width=fbyte_or_aacnt+6,
             
             // Increment cur glyph and mark state as started.
             hi_or_started=range_or_curglyph+1

             :

             //////////////////////////////////////////////////
             // Was font table byte nonzero after subtracting 40?
             fbyte_or_aacnt ?

             // Yes, nonzero. 
             ( // Now see if we have started seeing font table
               // bytes yet, or if we are still going thru the
               // PPM header.
               hi_or_started?
               // Yes, we have started, so NOP.
               0
               :
               // Not started, so emit the current fbyte_or_aacnt (to
               // generate PPM header)
               output(fbyte_or_aacnt),
               // Comma says ignore results of previous ternary
               // operator and leave range_or_curglyph unchanged
               range_or_curglyph
               )

             :

             //////////////////////////////////////////////////
             // Font table byte was 40 (SETCUR token), so set
             // current glyph to next byte in font table.
             *++fontptr)

            //////////////////////////////////////////////////////////////////////
            // Compare the newly-updated cur glyph to...

            ==

            // The current text character, OR'ed with 32 to put in
            // range of 32-63 or 96-127 (forces lowercase), and XOR'ed
            // with mask to deobfuscate "ioccc 2011" from progdata.
            ((*textptr|32)^xormask_or_quality)

            &&

            ////////////////////////////////////////
            // Need to clear found bit whenever we hit a SETCUR token.
            1[fontptr]-forty)


         ); // Empty for loop

  //////////////////////////////////////////////////
  // Step 2: Generate the dang image.
  //
  // All of the techniques here are based upon the PDF presentation at
  // http://iquilezles.org/www/material/nvscene2008/nvscene2008.htm
  //
  // Iterate over image rows. Note xormask_or_quality gets value 0 in
  // preview mode, and 3 in high-quality mode.
  for (xormask_or_quality=3*(argc_or_image_col<3); ++image_row<110; )

    // Iterate over image columns. 
    for (argc_or_image_col=-301;

         // Initialize the pixel color to zero, 600 cols total
         pix_color=zeros, ++argc_or_image_col<300;

         // Output pixel after each iteration.
         output(pix_color.c),output(pix_color.a),output(pix_color.t))

      // Iterate over AA samples: either 1 (preview) or 4 (high-quality).
      for (fbyte_or_aacnt=minus_one; ++fbyte_or_aacnt<=xormask_or_quality;)

        // Shade this sample. This for loop iterates over the initial
        // ray as well as reflection rays (in high quality mode).
        
        for (// Start marching at the shared ray origin 
             march_point=make_vec3(-4,4.6,29),

               // Starting direction is a function of image row/column
               // and AA sample number.
               ray_dir=normalize(
                 add_scaled(
                   add_scaled(
                     add_scaled(zeros, normalize(make_vec3(5,0,2)),
                                argc_or_image_col + argc_or_image_col +
                                fbyte_or_aacnt/2 ), 
                     normalize(make_vec3(2,-73,0)),
                     image_row+image_row+fbyte_or_aacnt%2),
                   make_vec3(30.75,-6,-75),
                   20) ),

               // The initial ray contribution is 255 for preview mode
               // or 63 for each AA sample in high-quality mode
               // (adding 4 of them gets you 252 which is close
               // enough).
               //
               // Also, here bounces is initialized to 3 in
               // high-quality mode or 0 in preview mode.
               ray_contribution=hit=
                 255-(bounces=xormask_or_quality)*64;

             // The bounces variable acts as a counter for remaining
             // bounces; at the start, bounces is non-negative and hit
             // is non-zero so the loop always runs at least once. It
             // will stop when hit is 0 or hit is 1 and bounces is -1.
             hit*bounces+hit;

             // After each iteration (reflection), ray contribution
             // scales by 0.5.
             ray_contribution*=half)

          {
            
            // Perform the actual ray march using sphere tracing:
            for (// Initialize ray distance, current distance, and hit to 0
                 raydist_or_brightness=curdist_or_specular=hit=0;

                 // Keep going until hit or ray distance exceeds 94 units.
                 // Note ray distance always incremented by current.
                 !hit && 94 > (raydist_or_brightness+=
                               
                   // Obtain distance to scene at current point
                   curdist_or_specular=dist_to_scene(
                                                      
                     // Update current point by moving by current
                     // distance along ray direction
                     march_point = add_scaled(
                       march_point,
                       ray_dir,
                       curdist_or_specular)));

                 // After each ray update, set hit=1 if current
                 // distance to scene primitive is less than 0.1
                 hit=curdist_or_specular<.01);

            // Done with ray marching!
            //
            // Now point is equal to march_point, closest_point holds
            // the closest point in the scene to the current ray
            // point, and objpoint_or_material holds the material
            // color of the closest object.

            // Now fake ambient occlusion loop (see iq's PDF for explanation):
            for (// Compute scene normal at intersection
                 normal = normalize(add_scaled(point,closest_point,minus_one)),
                   // This is actually included here to initialize the
                   // sky color below (gross).
                   dot_out_or_tmp = ray_dir.t*ray_dir.t,
                   // Also used below but initialized here.
                   sample_color = objpoint_or_material,
                   // Start at full brightness
                   raydist_or_brightness=1;

                 // 5 iterations if we hit something, 0 if not (saves
                 // wrapping for loop in if statement).
                 ++curdist_or_specular<6*hit;

                 // AO with exponential decay 
                 raydist_or_brightness -=
                   clamp(curdist_or_specular / 3 -
                     dist_to_scene(
                      add_scaled(march_point, normal, curdist_or_specular/3)))
                   / pow(2,curdist_or_specular));

            // AO has been computed, time to get the final color of
            // this ray sample. Note sample_color has been initialized
            // to material of closest primative above.

            sample_color = hit ? // Did this ray hit?

              //////////////////////////////////////////////////
              // Yes, the ray hit.

              // Get the Blinn-Phong specular coefficient as dot
              // product between normal and halfway vector, raised to
              // high power.
              curdist_or_specular =
                pow(clamp(dot(normal,
                  // normalize halfway vector
                  normalize(
                    // create halfway vector
                    add_scaled(
                      // objpoint_or_material is now light direcection
                      objpoint_or_material=normalize(make_vec3(minus_one,1,2)),
                      ray_dir,
                      minus_one)))),
                  // raised to the 40th power
                  forty),

              // Mix in white color for specular
              pix_color = add_scaled(pix_color, ones,
                                     ray_contribution*curdist_or_specular),

              // Take the brightness computed during AO and modulate
              // it with diffuse and ambient light.
              raydist_or_brightness *=
                // Diffuse - objpoint_or_material is light direction
                clamp(dot(normal, objpoint_or_material))*half*twothirds +
                // Ambient
                twothirds,

              // Modulate ray_contribution after hit 
              ray_contribution *= bounces-- ? // Are there any bounces left?
              
                // Yes, there are bounces left, so this hit should
                // account for 2/3 of the remaining energy (the next
                // will account for the final 1/3). We need to remove
                // the additive component already taken by specular,
                // however.
                twothirds - twothirds * curdist_or_specular :

                // No, there are no bounces left, so just use up
                // all the energy not taken by specular.
                1-curdist_or_specular,

              // Now after all of that, we're actually going to leave
              // sample_color unchanged (i.e. whatever closest
              // primitive material color was).
              sample_color
              :

              //////////////////////////////////////////////////
              // Nope, ray missed. Remember when we initialized
              // dot_out_or_tmp to contain z^2 above? We now use that
              // to shade the sky, which gets white along the +/- z
              // axis, and blue elsewhere.
              make_vec3(dot_out_or_tmp, dot_out_or_tmp, 1);

            // Add the weighted sample_color into the pixel color. 
            pix_color = add_scaled(pix_color,
                                   sample_color,
                                   ray_contribution*raydist_or_brightness);

            // Pop out from the object a bit before starting to march
            // the reflection ray so we don't immediately detect the
            // same intersection that we're on.
            march_point = add_scaled(march_point,normal,.1);

            // Update the ray direction to be the reflection direction
            // using the usual calculation.
            ray_dir = add_scaled(ray_dir,normal,-2*dot(ray_dir,normal));

          }


  return 0;

}
コード例 #15
0
ファイル: parse_world.c プロジェクト: Benjamin-L/qrender
void parse_world(state_t* state, FILE* f) {
	char line[4096];

	char** tokens;
	int n_tokens;

	int bsdf_index = 0;

	transform_data_t t;
	t.has_users = false;
	t.index = 0;
	matrix_t m_identity = {{1.f,0.f,0.f,0.f},
			       {0.f,1.f,0.f,0.f},
			       {0.f,0.f,1.f,0.f},
			       {0.f,0.f,0.f,1.f}};
	memcpy(t.t.m, m_identity, sizeof(matrix_t));
	memcpy(t.t.m_inv, m_identity, sizeof(matrix_t));

	state->transforms[t.index++] = t.t;
	state->n_transforms++;
	realloc_transforms();

	transform_data_t tstack[1024];
	int tstack_ptr = 0;
	
	while(fgets(line, 4096, f) != NULL) {
		if(line[0] != '#') {
			line[strlen(line)-1] = '\0';

			tokens = tokenize(line, &n_tokens);
			if(n_tokens != 0) {
				if(strcmp(tokens[0],"EndWorld") == 0) {
					free(tokens);
					return;
				}
				else if(strcmp(tokens[0],"PushTransform") == 0) {
					qr_assert(n_tokens==1, "parser", "PushTransform takes only no arguments, found %d",n_tokens-1);
					tstack[++tstack_ptr] = t;
				}
				else if(strcmp(tokens[0],"PopTransform") == 0) {
					qr_assert(n_tokens==1, "parser", "PopTransform takes no arguments, found %d",n_tokens-1);
					transform_data_t tmp = t;
					t = tstack[tstack_ptr--];
					t.has_users = t.has_users || tmp.has_users;
					t.index = state->n_transforms;
				}
				else if(strcmp(tokens[0],"LoadIdentity") == 0) {
					qr_assert(n_tokens==1, "parser", "LoadIdentity takes no arguments, found %d",n_tokens-1);
					if(t.has_users == true) {
						t.index = state->n_transforms;
						t.has_users = false;
					}
				}
				else if(strcmp(tokens[0],"Translate") == 0) {
					qr_assert(n_tokens==4, "parser", "Translate takes only 3 arguments, found %d",n_tokens-1);
					if(t.has_users == true) {
						t.index = state->n_transforms;
						t.has_users = false;
					}
					transform_t tmp;
					translate(make_vec3(to_float(tokens[1]),to_float(tokens[2]),to_float(tokens[3])), &tmp);
					mul_matrix(&tmp.m, &t.t.m, &t.t.m);
					mul_matrix(&tmp.m_inv, &t.t.m_inv, &t.t.m_inv);
				}
				else if(strcmp(tokens[0],"Scale") == 0) {
					qr_assert(n_tokens==4, "parser", "Scale takes only 3 arguments, found %d",n_tokens-1);
					if(t.has_users == true) {
						t.index = state->n_transforms;
						t.has_users = false;
					}
					transform_t tmp;
					scale(make_vec3(to_float(tokens[1]),to_float(tokens[2]),to_float(tokens[3])), &tmp);
					mul_matrix(&tmp.m, &t.t.m, &t.t.m);
				}
				else if(strcmp(tokens[0],"LookAt") == 0) {
					qr_assert(n_tokens==7, "parser", "LookAt takes only 6 arguments, found %d",n_tokens-1);
					if(t.has_users == true) {
						t.index = state->n_transforms;
						t.has_users = false;
					}
					lookat(make_vec3(to_float(tokens[1]),to_float(tokens[2]),to_float(tokens[3])), make_vec3(to_float(tokens[4]),to_float(tokens[5]),to_float(tokens[6])), &t.t);
				}
				else if(strcmp(tokens[0],"BSDFDiffuse") == 0) {
					qr_assert(n_tokens==4, "parser", "BSDFDiffuse takes only 3 arguments, found %d",n_tokens-1);
					state->n_bsdfs++;
					realloc_bsdfs();
					state->bsdfs[state->n_bsdfs-1] = make_bsdf_diffuse(make_vec3(to_float(tokens[1]), to_float(tokens[2]), to_float(tokens[3])));
					bsdf_index = state->n_bsdfs-1;
				}
				else if(strcmp(tokens[0],"Sphere") == 0) {
					qr_assert(n_tokens==2, "parser", "Sphere takes only one argument, found %d",n_tokens-1);
					state->primitives = (primitive_t*)realloc(state->primitives, sizeof(primitive_t)*(++state->n_primitives));
					primitive_t p;
					p.type = PRIMITIVE_SPHERE;
					p.data = make_primitive_sphere(to_float(tokens[1]));
					p.t = t.index;
					p.bsdf = bsdf_index;
					t.has_users = true;
					state->primitives[state->n_primitives-1] = p;

					state->transforms[t.index] = t.t;
					state->n_transforms++;
					realloc_transforms();
				}
				else if(strcmp(tokens[0],"Mesh") == 0) {
					qr_assert(n_tokens==2, "parser", "Mesh takes only one argument, found %d",n_tokens-1);
					state->transforms[t.index] = t.t;
					t.has_users = true;
					state->n_transforms++;
					realloc_transforms();
					load_mesh(tokens[1], state, t, bsdf_index, NULL);
				}
				else if(strcmp(tokens[0],"MeshMat") == 0) {
					qr_assert(n_tokens==3, "parser", "MeshMat takes only 2 arguments, found %d",n_tokens-1);
					t.has_users = true;
					state->transforms[t.index] = t.t;
					state->n_transforms++;
					realloc_transforms();
					load_mesh(tokens[1], state, t, bsdf_index, tokens[2]);
				}
				else if(strcmp(tokens[0],"PointLight") == 0) {
					qr_assert(n_tokens==7, "parser", "PointLight takes only 6 arguments, found %d",n_tokens-1);
					realloc_lights();
					state->lights[state->n_lights++] = make_light_point(make_vec3(to_float(tokens[1]),to_float(tokens[2]),to_float(tokens[3])), make_vec3(to_float(tokens[4]),to_float(tokens[5]),to_float(tokens[6])));
				}
				else if(strcmp(tokens[0],"SphereLight") == 0) {
					qr_assert(n_tokens==8, "parser", "SphereLight takes only 7 arguments, found %d",n_tokens-1);
					realloc_lights();
					state->lights[state->n_lights++] = make_light_sphere(make_vec3(to_float(tokens[1]),to_float(tokens[2]),to_float(tokens[3])), to_float(tokens[4]), make_vec3(to_float(tokens[5]),to_float(tokens[6]),to_float(tokens[7])));
				}
				else if(strcmp(tokens[0],"Camera") == 0) {
					qr_assert(n_tokens==2, "parser", "Camera takes only one argument, found %d",n_tokens-1);
					state->camera_fplane = to_float(tokens[1]);
					state->camera_transform = t.index;
					t.has_users = true;
					state->camera_origin = transform_point(make_vec3(0.f,0.f,0.f), t.t, true);

					state->transforms[t.index] = t.t;
					state->n_transforms++;
					realloc_transforms();
				}
				else {
					ERROR("parser", "Unknown directive: %s",tokens[0]);
				}
			}
		}
	}
	ERROR("parser", "File ended in the middle of a World section");
}
コード例 #16
0
ファイル: mesh.cpp プロジェクト: PierreCAMILLI/mif23-projet
void vertex_normal( Mesh& m, const Vector& n )
{
    vertex_normal(m, make_vec3(n.x, n.y, n.z));
}
コード例 #17
0
int draw( void )
{
    
    if(wireframe)
    {
        glClearColor(1, 1, 1, 1);
        glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
        glLineWidth(2);
    }
    else
    {
        glClearColor(0.2f, 0.2f, 0.2f, 1);
        glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
    }
    
    // effacer l'image
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    
    if(key_state('r'))
    {
        clear_key_state('r');
        reload_program();
    }
    
    // recupere les mouvements de la souris
    int mx, my;
    unsigned int mb= SDL_GetRelativeMouseState(&mx, &my);
    int mousex, mousey;
    SDL_GetMouseState(&mousex, &mousey);
    
    // deplace la camera
    if(mb & SDL_BUTTON(1))
        orbiter_rotation(camera, mx, my);      // tourne autour de l'objet
    else if(mb & SDL_BUTTON(2))
        orbiter_translation(camera, (float) mx / (float) window_width(), (float) my / (float) window_height()); // deplace le point de rotation
    else if(mb & SDL_BUTTON(3))
        orbiter_move(camera, mx);           // approche / eloigne l'objet
    
    // recupere les transformations
    Transform model= make_identity();
    Transform view= orbiter_view_transform(camera);
    Transform projection= orbiter_projection_transform(camera, window_width(), window_height(), 45);
    Transform viewport= make_viewport(window_width(), window_height());
    
    Transform mvp= projection * view * model;
    Transform mvpInv= make_inverse(mvp);
    Transform mv= model * view;
    
    // affiche l'objet
    if(program_failed == false)
    {
        if(key_state('w'))
        {
            clear_key_state('w');
            wireframe= !wireframe;
        }
        
        // configuration minimale du pipeline
        glBindVertexArray(vao);
        glUseProgram(program);
        
        // affecte une valeur aux uniforms
        // transformations standards
        program_uniform(program, "modelMatrix", model);
        program_uniform(program, "modelInvMatrix", make_inverse(model));
        program_uniform(program, "viewMatrix", view);
        program_uniform(program, "viewInvMatrix", make_inverse(view));
        program_uniform(program, "projectionMatrix", projection);
        program_uniform(program, "projectionInvMatrix", make_inverse(projection));
        program_uniform(program, "viewportMatrix", viewport);
        program_uniform(program, "viewportInvMatrix", make_inverse(viewport));
        
        program_uniform(program, "mvpMatrix", mvp);
        program_uniform(program, "mvpInvMatrix", mvpInv);
        
        program_uniform(program, "mvMatrix", mv);
        program_uniform(program, "normalMatrix", make_normal_transform(mv));
        
        // interactions
        program_uniform(program, "viewport", make_vec2(window_width(), window_height()));
        program_uniform(program, "time", (float) SDL_GetTicks());
        program_uniform(program, "motion", make_vec3(mx, my, mb & SDL_BUTTON(1)));
        program_uniform(program, "mouse", make_vec3(mousex, mousey, mb & SDL_BUTTON(1)));
        
        // textures
        for(unsigned int i= 0; i < (unsigned int) textures.size(); i++)
        {
            char uniform[1024];
            sprintf(uniform, "texture%d", i);
            program_use_texture(program, uniform, i, textures[i]);
        }
        
        // go
        glDrawArrays(GL_TRIANGLES, 0, vertex_count);
    }
    
    // affiche les infos
    begin(widgets);
    if(program_failed)
    {
        label(widgets, "[error] program '%s'", program_filename.path);
        begin_line(widgets);
        text_area(widgets, 20, program_log.c_str(), program_area);
    }
    else
    {
        label(widgets, "program '%s' running...", program_filename.path);
        if(mesh_filename[0] != 0)
        {
            begin_line(widgets);
            label(widgets, "mesh '%s', %u positions, %u texcoords, %u normals", mesh_filename.path, 
                (unsigned int) mesh.positions.size(),
                (unsigned int) mesh.texcoords.size(),
                (unsigned int) mesh.normals.size());
        }
        for(unsigned int i= 0; i < (unsigned int) texture_filenames.size(); i++)
        {
            begin_line(widgets);
            label(widgets, "texture%u '%s'", i, texture_filenames[i].path);
        }
    }
    end(widgets);
    
    draw(widgets, window_width(), window_height());
    
    
    if(key_state('s'))
    {
        clear_key_state('s');
        screenshot("shader_kit.png");
    }
    
    if(key_state('c'))
    {
        clear_key_state('c');
        write_orbiter(camera, "orbiter.txt");
    }
    if(key_state('v'))
    {
        clear_key_state('v');
        camera= read_orbiter("orbiter.txt");
    }
    
    return 1;
}
コード例 #18
0
ファイル: mesh.cpp プロジェクト: PierreCAMILLI/mif23-projet
unsigned int push_vertex( Mesh& m, const float x, const float y, const float z )
{
    return push_vertex(m, make_vec3(x, y, z));
}
コード例 #19
0
ファイル: mesh.cpp プロジェクト: PierreCAMILLI/mif23-projet
unsigned int push_vertex( Mesh& m, const Point& p )
{
    return push_vertex(m, make_vec3(p.x, p.y, p.z));
}
コード例 #20
0
ファイル: mesh.cpp プロジェクト: PierreCAMILLI/mif23-projet
void vertex_color( Mesh& m, const Color& color )
{
    vertex_color(m, make_vec3(color.r, color.g, color.b));
}