Example #1
0
MatrixF PlaneReflector::getCameraReflection( const MatrixF &camTrans )
{
   Point3F normal = refplane;

   // Figure out new cam position
   Point3F camPos = camTrans.getPosition();
   F32 dist = refplane.distToPlane( camPos );
   Point3F newCamPos = camPos - normal * dist * 2.0;

   // Figure out new look direction
   Point3F i, j, k;
   camTrans.getColumn( 0, &i );
   camTrans.getColumn( 1, &j );
   camTrans.getColumn( 2, &k );

   i = MathUtils::reflect( i, normal );
   j = MathUtils::reflect( j, normal );
   k = MathUtils::reflect( k, normal );
   //mCross( i, j, &k );


   MatrixF newTrans(true);
   newTrans.setColumn( 0, i );
   newTrans.setColumn( 1, j );
   newTrans.setColumn( 2, k );

   newTrans.setPosition( newCamPos );

   return newTrans;
}
Example #2
0
void SelfLocator::update(RobotPoseHypotheses& robotPoseHypotheses)
{
  robotPoseHypotheses.hypotheses.clear();
  //update only available for two types of pose calculation:
  if(poseCalculatorType != POSE_CALCULATOR_PARTICLE_HISTORY && poseCalculatorType != POSE_CALCULATOR_K_MEANS_CLUSTERING)
    return;
  //sample set needs to have been updated within this frame:
  if(lastPoseComputationTimeStamp != bb->theFrameInfo.time)
  {
    RobotPose dummyPose;
    update(dummyPose);
  }

  if(poseCalculatorType == POSE_CALCULATOR_PARTICLE_HISTORY)
  {
    //get hypotheses:
    PoseCalculatorParticleHistory< Sample, SampleSet<Sample> >* poseHistoryCalc
      = (PoseCalculatorParticleHistory< Sample, SampleSet<Sample> >*)poseCalculator;
    vector< pair<int, int> > clusters = poseHistoryCalc->getClusters();
    sort(clusters.begin(), clusters.end(), cmpClusterPairs);
    const unsigned int MAX_HYPOTHESES = min<unsigned int>(robotPoseHypotheses.MAX_HYPOTHESES, clusters.size());
    for(unsigned int i = 0; i < MAX_HYPOTHESES; ++i)
    {
      // No mini clusters, please:
      if(clusters[i].second <= 3)
        break;
      // Compute average position:
      RobotPoseHypothesis newHypothesis;
      poseHistoryCalc->calcPoseOfCluster(newHypothesis, clusters[i].first);
      Vector2<int> newTrans(static_cast<int>(newHypothesis.translation.x), static_cast<int>(newHypothesis.translation.y));
      // Compute variance of position:
      int varianceX(0);
      int varianceY(0);
      for(int j = 0; j < samples->size(); ++j)
      {
        Sample& s(samples->at(j));
        if(s.cluster == clusters[i].first)
        {
          varianceX += (s.translation.x - newTrans.x) * (s.translation.x - newTrans.x);
          varianceY += (s.translation.y - newTrans.y) * (s.translation.y - newTrans.y);
        }
      }
      varianceX /= (clusters[i].second - 1);
      varianceY /= (clusters[i].second - 1);
      newHypothesis.positionCovariance[0][0] = static_cast<float>(varianceX);
      newHypothesis.positionCovariance[1][1] = static_cast<float>(varianceY);
      // Compute covariance:
      int cov_xy(0);
      for(int j = 0; j < samples->size(); ++j)
      {
        Sample& s(samples->at(j));
        if(s.cluster == clusters[i].first)
          cov_xy += (s.translation.x - newTrans.x) * (s.translation.y - newTrans.y);
      }
      cov_xy /= (clusters[i].second - 1);
      newHypothesis.positionCovariance[0][1] = newHypothesis.positionCovariance[1][0] = static_cast<float>(cov_xy);
      // Finally add to list:
      robotPoseHypotheses.hypotheses.push_back(newHypothesis);
    }
  }

  if(poseCalculatorType == POSE_CALCULATOR_K_MEANS_CLUSTERING)
  {
    //get hypotheses:
    PoseCalculatorKMeansClustering< Sample, SampleSet<Sample>, 5, 1000 >* poseKMeansCalc
      = (PoseCalculatorKMeansClustering< Sample, SampleSet<Sample>, 5, 1000 >*)poseCalculator;
    for(unsigned int i = 0; i < 5; ++i)
    {
      RobotPoseHypothesis newHypothesis;
      newHypothesis.validity = poseKMeansCalc->getClusterValidity(i);
      if(newHypothesis.validity > 0)
      {
        poseKMeansCalc->getClusterPose(newHypothesis, i);
        newHypothesis.positionCovariance[0][0] = 0.1f;   //TODO: calculate covariances
        newHypothesis.positionCovariance[0][1] = 0.1f;
        newHypothesis.positionCovariance[1][0] = 0.1f;
        newHypothesis.positionCovariance[1][1] = 0.1f;
        robotPoseHypotheses.hypotheses.push_back(newHypothesis);
      }
    }
  }
}
Example #3
0
void CalBone::blendState(float unrampedWeight, const CalVector& translation, 
                         const CalQuaternion& rotation, float scale,
                         bool replace, float rampValue, bool absoluteTranslation )
{

  // Attenuate the weight by the accumulated replacement attenuation.  Each applied
  // "replacement" animation attenuates the weights of the subsequent animations by
  // the inverse of its rampValue, so that when a replacement animation ramps up to
  // full, all lesser priority animations automatically ramp down to zero.
  float rampedWeight = unrampedWeight * rampValue;
  float attenuatedWeight = rampedWeight * m_accumulatedReplacementAttenuation;

  // It appears that quaternion::blend() only works with blend factors of 0-1, so
  // I'll clamp the scale to that range.
  if( scale < 0.0f ) {
    scale = 0.0f;
  }
  if( scale > 1.0f ) {
    scale = 1.0f;
  }

  // Now apply weighted, scaled transformation.  For weights, Cal starts with the
  // first and then blends the later ones in proportion to their weights.  Though this
  // would seem to depend on the order, you can reason by induction that it does not.
  // Each application of an animation gives it the correct proportion to the others in
  // aggregate and leaves in tact the proportions among the others.
  if( m_accumulatedWeightAbsolute == 0.0f ) {

    // It is the first state, so we can just copy it into the bone state.  The first animation
    // must be applied with scale = 1.0 since it is the initial pose rather than something
    // to be blended onto a pose.  If we scale the first state, the skeleton will look like
    // a crumpled spider.
    m_accumulatedWeightAbsolute = attenuatedWeight;
    m_translationAbsolute = absoluteTranslation ? translation : m_translation + translation;
    m_rotationAbsolute = rotation;

    // I would like to scale this blend, but I cannot since it is the initial pose.  Thus I
    // will store away this scale and compensate appropriately on the second blend.  See below.
    // After applying blend2, the blend1 = 1 - blend2.  If I would like to scale blend1 to 30%
    // of its original scale, for example, then I would like,
    //
    //      ( 1 - blend2' ) = 0.3 * ( 1 - blend2 )
    // so,
    //      blend2' = 1 - 0.3 * ( 1 - blend2 )
    //
    // or similarly for any value of m_firstBlendScale instead of 30%.
    m_firstBlendScale = scale;
  } else {

    // Consider an example with two animations, one or both of them "replace" animations.
    // Wave is a "replace" animation, played on top of Walk.  Wave is applied first since it is a 
    // "replace" animation and Walk is not.  Imagine Wave is ramping in, currently at 80%.  Wave sets
    // the initial pose 100% and then Walk is applied over that pose with a blend factor of 0.2.  The result
    // is that Wave is 80% and Walk is 20%, which is what you'd expect for replace semantics.
    //
    // Animation    RampedWeight  AttenuatedWeight    InAccumWeightAbs  OutAccAttenuation   Factor
    // Wave         0.8           0.8                 0.0               0.2 (replace)       n/a (100%)
    // Walk         1.0           0.2                 0.8               0.2 (not replace)   0.2/(0.8+0.2) = 0.2
    //
    // Consider the same example with two animations, but neither of them "replace" animations.
    // Assume Wave is applied first.  Imagine Wave is ramping in, currently at 80%.  Wave sets
    // the initial pose 100% and then Walk is applied over that pose with a blend factor of 0.55.  The result
    // is that Wave is 45% and Walk is 55%, which is about what you'd expect for non-replace semantics.
    //
    // Animation    RampedWeight  AttenuatedWeight    InAccumWeightAbs  OutAccAttenuation   Factor
    // Wave         0.8           0.8                 0.0               1.0 (not replace)   n/a (100%)
    // Walk         1.0           1.0                 0.8               1.0 (not replace)   1.0/(0.8+1.0) = 0.55
    //
    // Consider the same example again but reverse the order of Wave and Walk, so Walk is applied first.
    // As before, imagine Wave is ramping in, currently at 80%.  Walk sets the initial pose 100% 
    // and then Wave is applied over that pose with a blend factor of 0.44.  The result
    // is that Wave is 44% and Walk is 56%, which is also about what you'd expect for non-replace semantics.
    //
    // Animation    RampedWeight  AttenuatedWeight    InAccumWeightAbs  OutAccAttenuation   Factor
    // Walk         1.0           1.0                 0.0               1.0 (not replace)   n/a (100%)
    // Wave         0.8           0.8                 1.0               1.0 (not replace)   0.8/(0.8+1.0) = 0.44
    //
    // Now consider an example in which Point and Wave are both applied over Walk, with Point applied
    // first at highest priority.  Assume that Point is ramped at 90% and Wave is ramped at 80%.  Both
    // Point and Wave are "replace" animations.  Walk is not.  The result is Walk is 2%, Wave is about 8%,
    // and Point is about 90%, which seems like a reasonable result.
    //
    // Animation    RampedWeight  AttenuatedWeight    InAccumWeightAbs  OutAccAttenuation   Factor
    // Point        0.9           0.9                 0                 0.1 (replace)       n/a (100%)
    // Wave         0.8           0.08                0.9               0.02 (replace)      0.08/(0.9+0.08) = 0.082
    // Walk         1.0           0.02                0.98              0.02 (not replace)  0.02/(0.98+0.02) = 0.02
    //
    // Finally, consider an example in which Point and Wave are both applied over Walk, but in which
    // none of the animations is a "replace" animation.  For this example, assume that Point, Wave,
    // and Walk all are fully ramped in at 100%.  The result is Walk is 33%, Wave is about 33%,
    // and Point is about 33%, which seems like the right result.
    //
    // Animation    RampedWeight  AttenuatedWeight    InAccumWeightAbs  OutAccAttenuation   Factor
    // Point        1.0           1.0                 0.0               1.0 (not replace)   n/a (100%)
    // Wave         1.0           1.0                 1.0               1.0 (not replace)   1.0/(1.0+1.0) = 0.5
    // Walk         1.0           1.0                 2.0               1.0 (not replace)   1.0/(1.0+2.0) = 0.33
    float factor = scale * attenuatedWeight / ( m_accumulatedWeightAbsolute + attenuatedWeight );

    // If the scale of the first blend was not 1.0, then I will adjust the factor of the second blend
    // to compensate, 
    //
    //      factor' = 1 - m_firstBlendScale * ( 1 - factor )
    //
    assert( factor <= 1.0f );
    factor = 1.0f - m_firstBlendScale * ( 1.0f - factor );
    CalVector newTrans(absoluteTranslation ? translation : m_translation + translation);
    m_translationAbsolute.blend(factor, newTrans);
    m_rotationAbsolute.blend(factor, rotation);
    m_accumulatedWeightAbsolute += attenuatedWeight;
    m_firstBlendScale = 1.0;
  }
  if( replace ) {
    m_accumulatedReplacementAttenuation *= ( 1.0f - rampValue );
  }
}