void Compound::addGeometry(const Pose3<>& parentPose, Geometry& geometry, SimRobotCore2::CollisionCallback* callback)
{
  // compute pose
  Pose3<> geomPose = parentPose;
  if(geometry.translation)
    geomPose.translate(*geometry.translation);
  if(geometry.rotation)
    geomPose.rotate(*geometry.rotation);

  // create geometry
  dGeomID geom = geometry.createGeometry(Simulation::simulation->staticSpace);
  if(geom)
  {
    dGeomSetData(geom, &geometry);

    // set pose
    dGeomSetPosition(geom, geomPose.translation.x, geomPose.translation.y, geomPose.translation.z);
    dMatrix3 matrix3;
    ODETools::convertMatrix(geomPose.rotation, matrix3);
    dGeomSetRotation(geom, matrix3);
  }

  // handle nested geometries
  for(std::list< ::PhysicalObject*>::const_iterator iter = geometry.physicalDrawings.begin(), end = geometry.physicalDrawings.end(); iter != end; ++iter)
  {
    Geometry* geometry = dynamic_cast<Geometry*>(*iter);
    if(geometry)
      addGeometry(geomPose, *geometry, callback);
  }
}
Beispiel #2
0
void Camera::CameraSensor::updateValue()
{
  // allocate buffer
  const unsigned int imageWidth = camera->imageWidth;
  const unsigned int imageHeight = camera->imageHeight;
  const unsigned int imageSize = imageWidth * imageHeight * 3;
  if(imageBufferSize < imageSize)
  {
    if(imageBuffer)
      delete[] imageBuffer;
    imageBuffer = new unsigned char[imageSize];
    imageBufferSize = imageSize;
  }

  // make sure the poses of all movable objects are up to date
  Simulation::simulation->scene->updateTransformations();

  // prepare offscreen renderer
  OffscreenRenderer& renderer = Simulation::simulation->renderer;
  renderer.makeCurrent(imageWidth, imageHeight);

  // setup image size and angle of view
  glViewport(0, 0, imageWidth, imageHeight);
  glMatrixMode(GL_PROJECTION);
  glLoadMatrixf(projection);
  glMatrixMode(GL_MODELVIEW);
  
  // enable lighting, textures, and smooth shading
  glEnable(GL_LIGHTING);
  glEnable(GL_TEXTURE_2D);
  glPolygonMode(GL_FRONT, GL_FILL);
  glShadeModel(GL_SMOOTH);

  // setup camera position
  Pose3<> pose = physicalObject->pose;
  pose.conc(offset);
  static const Matrix3x3<> cameraRotation(Vector3<>(0.f, -1.f, 0.f), Vector3<>(0.f, 0.f, 1.f), Vector3<>(-1.f, 0.f, 0.f));
  pose.rotate(cameraRotation);
  float transformation[16];
  OpenGLTools::convertTransformation(pose.invert(), transformation);
  glLoadMatrixf(transformation);
  
  // draw all objects
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
  Simulation::simulation->scene->drawAppearances();

  // read frame buffer
  renderer.finishImageRendering(imageBuffer, imageWidth, imageHeight);
  data.byteArray = imageBuffer;
}
void DepthImageSensor::DistanceSensor::updateValue()
{
  // make sure the poses of all movable objects are up to date
  Simulation::simulation->scene->updateTransformations();
  
  OffscreenRenderer& renderer = Simulation::simulation->renderer;

  renderer.makeCurrent(renderWidth, renderHeight);
  glViewport(0, 0, renderWidth, renderHeight);

  // setup image size and angle of view
  glMatrixMode(GL_PROJECTION);
  glLoadMatrixf(projection);
  glMatrixMode(GL_MODELVIEW);
  
  // disable lighting and textures, and use flat shading
  glDisable(GL_LIGHTING);
  glDisable(GL_TEXTURE_2D);
  glPolygonMode(GL_FRONT, GL_FILL);
  glShadeModel(GL_FLAT);

  // setup camera position
  Pose3<> pose = physicalObject->pose;
  pose.conc(offset);
  static const Matrix3x3<> cameraRotation(Vector3<>(0.f, -1.f, 0.f), Vector3<>(0.f, 0.f, 1.f), Vector3<>(-1.f, 0.f, 0.f));
  pose.rotate(cameraRotation);
  pose.rotate(Matrix3x3<>(Vector3<>(0, (depthImageSensor->angleX - renderAngleX) / 2.0f, 0)));
  
  float* val = imageBuffer;
  unsigned int widthLeft = depthImageSensor->imageWidth;
  for(unsigned int i = 0; i < numOfBuffers; ++i)
  {
    float transformation[16];
    OpenGLTools::convertTransformation(pose.invert(), transformation);
    glLoadMatrixf(transformation);

    // disable color rendering
    glColorMask(0, 0, 0, 0);
    
    // draw all objects
    glClear(GL_DEPTH_BUFFER_BIT);
    Simulation::simulation->scene->drawAppearances();

    // enable color rendering again
    glColorMask(1, 1, 1, 1);

    // read frame buffer
    renderer.finishDepthRendering(renderBuffer, renderWidth, renderHeight);

    if(depthImageSensor->projection == perspectiveProjection)
    {
      // convert pixels to points in world and compute the depth (renderBuffer == imageBuffer)
      const float halfP34 = projection[14] * 0.5f;
      const float halfP33m1 = projection[10] * 0.5f - 0.5f;
      for(float* end = val + renderWidth * renderHeight; val < end; ++val)
        *val = halfP34 / (*val + halfP33m1);
    }
    else
    {
      // convert pixels to points in world and compute the distances (renderBuffer != imageBuffer)
      const float fInvSqr = 1.f / (projection[0] * projection[0]);
      const float halfP34 = projection[14] * 0.5f;
      const float halfP33m1 = projection[10] * 0.5f - 0.5f;
      float* const mid = lut[bufferWidth / 2];
      const float factor = 2.0f / float(renderWidth);
      const unsigned int end = std::min(bufferWidth, widthLeft);
      for(unsigned int i = 0; i < end; ++i)
      {
        const float vx = (lut[i] - mid) * factor;
        *val++ = std::min<float>(halfP34 / (*lut[i] + halfP33m1) * sqrt(1.f + vx * vx * fInvSqr), max);
      }
      widthLeft -= end;
      pose.rotate(Matrix3x3<>(Vector3<>(0, -renderAngleX, 0)));
    }
  }
}
Beispiel #4
0
bool Camera::CameraSensor::renderCameraImages(SimRobotCore2::Sensor** cameras, unsigned int count)
{
  if(lastSimulationStep == Simulation::simulation->simulationStep)
    return true;

  // allocate buffer
  const unsigned int imageWidth = camera->imageWidth;
  const unsigned int imageHeight = camera->imageHeight;
  const unsigned int imageSize = imageWidth * imageHeight * 3;
  const unsigned int multiImageBufferSize = imageSize * count;
  if(imageBufferSize < multiImageBufferSize)
  {
    if(imageBuffer)
      delete[] imageBuffer;
    imageBuffer = new unsigned char[multiImageBufferSize];
    imageBufferSize = multiImageBufferSize;
  }

  // make sure the poses of all movable objects are up to date
  Simulation::simulation->scene->updateTransformations();

  // prepare offscreen renderer
  OffscreenRenderer& renderer = Simulation::simulation->renderer;
  renderer.makeCurrent(imageWidth, imageHeight * count);

  // setup angle of view
  glMatrixMode(GL_PROJECTION);
  glLoadMatrixf(projection);
  glMatrixMode(GL_MODELVIEW);
  
  // enable lighting, textures, and smooth shading
  glEnable(GL_LIGHTING);
  glEnable(GL_TEXTURE_2D);
  glPolygonMode(GL_FRONT, GL_FILL);
  glShadeModel(GL_SMOOTH);

  // clear buffers
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

  // render images
  int currentHorizontalPos = 0;
  unsigned char* currentBufferPos = imageBuffer;
  for(unsigned int i = 0; i < count; ++i)
  {
    CameraSensor* sensor = (CameraSensor*)cameras[i];
    ASSERT(sensor->lastSimulationStep != Simulation::simulation->simulationStep);

    glViewport(0, currentHorizontalPos, imageWidth, imageHeight);

    // setup camera position
    Pose3<> pose = sensor->physicalObject->pose;
    pose.conc(sensor->offset);
    static const Matrix3x3<> cameraRotation(Vector3<>(0.f, -1.f, 0.f), Vector3<>(0.f, 0.f, 1.f), Vector3<>(-1.f, 0.f, 0.f));
    pose.rotate(cameraRotation);
    float transformation[16];
    OpenGLTools::convertTransformation(pose.invert(), transformation);
    glLoadMatrixf(transformation);
  
    // draw all objects
    Simulation::simulation->scene->drawAppearances();

    sensor->data.byteArray = currentBufferPos;
    sensor->lastSimulationStep = Simulation::simulation->simulationStep;

    currentHorizontalPos += imageHeight;
    currentBufferPos += imageSize;
  }

  // read frame buffer
  renderer.finishImageRendering(imageBuffer, imageWidth, imageHeight * count);
  return true;
}