Esempio n. 1
0
  void LightPoint::calcMatWorld(Float4x4& ret, const float model_radius, 
    const float desired_radius) const {
    // Before updating world matrix for this frame, save it for next frame 
    // (to generate velocity data later on)
    ret.identity();

    float scale =  desired_radius / model_radius;
    ret.m[0] = scale;  // (0,0)
    ret.m[5] = scale;  // (1,1)
    ret.m[10] = scale;  // (2,2);

    // Now offset by the light's postion
#ifdef COLUMN_MAJOR
    ret.m[12] = pos_world_[0];
    ret.m[13] = pos_world_[1];
    ret.m[14] = pos_world_[2];
#else
    ret.m[3] = pos_world_[0];
    ret.m[7] = pos_world_[1];
    ret.m[11] = pos_world_[2];
#endif
  }
Esempio n. 2
0
void renderFrame(float dt) {
  if (rotate_light) {
    renderer::LightDir* light = render->light_dir();
    Float3* dir = light->direction_world();
    Float4x4::rotateMatYAxis(mat_tmp, dt);
    Float3 new_dir;
    Float3::affineTransformVec(new_dir, mat_tmp, *dir);
    dir->set(new_dir);
  }

  // Move the camera
  delta_pos.set(cur_dir);
  const Float3 zeros(0, 0, 0);
  if (!Float3::equal(delta_pos, zeros)) {
    delta_pos.normalize();
    Float3::scale(delta_pos, camera_speed * dt);
    if (running) {
      Float3::scale(delta_pos, camera_run_mulitiplier);
    }
    render->camera()->moveCamera(&delta_pos);
  }

  float* cur_coeff;
  if (fit_right && fit_left) {
    cur_coeff = l_hand_coeffs[cur_image]->coeff();
    models[0]->updateMatrices(l_hand_coeffs[cur_image]->coeff());
    models[1]->updateMatrices(r_hand_coeffs[cur_image]->coeff());
  }
  if (fit_right) {
    cur_coeff = r_hand_coeffs[cur_image]->coeff();
    models[0]->updateMatrices(r_hand_coeffs[cur_image]->coeff());
  } else if (fit_left) {
    cur_coeff = l_hand_coeffs[cur_image]->coeff();
    models[0]->updateMatrices(l_hand_coeffs[cur_image]->coeff());
  }
  HandGeometryMesh::setCurrentStaticHandProperties(cur_coeff);

  // Now render the final frame
  Float4x4 identity;
  identity.identity();
  switch (render_output) {
  case 1:
    render->renderFrame(dt);
    if (render_depth) {
      for (uint32_t k = 0; k < std::min<uint32_t>(MAX_KINECTS, 
        num_point_clouds_to_render); k++) {
        render->renderColoredPointCloud(geometry_points[k], 
          &camera_view[k], 
          point_cloud_scale * 1.5f * static_cast<float>(settings.width) / 4.0f);
      }
    }
    break;
  case 2:
    float coeff[num_models * num_coeff_fit];
    if (fit_left && !fit_right) {
      memcpy(coeff, l_hand_coeffs[cur_image]->coeff(), sizeof(coeff[0])*num_coeff_fit);
    } else if (!fit_left && fit_right) {
      memcpy(coeff, r_hand_coeffs[cur_image]->coeff(), sizeof(coeff[0])*num_coeff_fit);
    } else {
      memcpy(coeff, l_hand_coeffs[cur_image]->coeff(), sizeof(coeff[0])*num_coeff_fit);
      memcpy(&coeff[num_coeff_fit], r_hand_coeffs[cur_image]->coeff(), 
        sizeof(coeff[0])*num_coeff_fit);
    }

    fit->model_renderer()->drawDepthMap(coeff, num_coeff_fit, models,
      num_models, cur_kinect, false);
    fit->model_renderer()->visualizeDepthMap(wnd, cur_kinect);
    break;
  default:
    throw runtime_error("ERROR: render_output is an incorrect value");
  }
  wnd->swapBackBuffer();
}