void SkeletonModel::applyPalmData(int jointIndex, const QVector<int>& fingerJointIndices, const QVector<int>& fingertipJointIndices, PalmData& palm) { if (jointIndex == -1) { return; } const FBXGeometry& geometry = _geometry->getFBXGeometry(); float sign = (jointIndex == geometry.rightHandJointIndex) ? 1.0f : -1.0f; glm::quat palmRotation; getJointRotation(jointIndex, palmRotation, true); applyRotationDelta(jointIndex, rotationBetween(palmRotation * geometry.palmDirection, palm.getNormal()), false); getJointRotation(jointIndex, palmRotation, true); // sort the finger indices by raw x, get the average direction QVector<IndexValue> fingerIndices; glm::vec3 direction; for (size_t i = 0; i < palm.getNumFingers(); i++) { glm::vec3 fingerVector = palm.getFingers()[i].getTipPosition() - palm.getPosition(); float length = glm::length(fingerVector); if (length > EPSILON) { direction += fingerVector / length; } fingerVector = glm::inverse(palmRotation) * fingerVector * -sign; IndexValue indexValue = { i, atan2f(fingerVector.z, fingerVector.x) }; fingerIndices.append(indexValue); } qSort(fingerIndices.begin(), fingerIndices.end()); // rotate palm according to average finger direction float directionLength = glm::length(direction); const int MIN_ROTATION_FINGERS = 3; if (directionLength > EPSILON && palm.getNumFingers() >= MIN_ROTATION_FINGERS) { applyRotationDelta(jointIndex, rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), direction), false); getJointRotation(jointIndex, palmRotation, true); } // no point in continuing if there are no fingers if (palm.getNumFingers() == 0 || fingerJointIndices.isEmpty()) { stretchArm(jointIndex, palm.getPosition()); return; } // match them up as best we can float proportion = fingerIndices.size() / (float)fingerJointIndices.size(); for (int i = 0; i < fingerJointIndices.size(); i++) { int fingerIndex = fingerIndices.at(roundf(i * proportion)).index; glm::vec3 fingerVector = palm.getFingers()[fingerIndex].getTipPosition() - palm.getFingers()[fingerIndex].getRootPosition(); int fingerJointIndex = fingerJointIndices.at(i); int fingertipJointIndex = fingertipJointIndices.at(i); glm::vec3 jointVector = extractTranslation(geometry.joints.at(fingertipJointIndex).bindTransform) - extractTranslation(geometry.joints.at(fingerJointIndex).bindTransform); setJointRotation(fingerJointIndex, rotationBetween(palmRotation * jointVector, fingerVector) * palmRotation, true); } stretchArm(jointIndex, palm.getPosition()); }
void LeapMotionPlugin::InputDevice::update(float deltaTime, const controller::InputCalibrationData& inputCalibrationData, const std::vector<LeapMotionPlugin::LeapMotionJoint>& joints, const std::vector<LeapMotionPlugin::LeapMotionJoint>& prevJoints) { glm::mat4 controllerToAvatar = glm::inverse(inputCalibrationData.avatarMat) * inputCalibrationData.sensorToWorldMat; glm::quat controllerToAvatarRotation = glmExtractRotation(controllerToAvatar); glm::vec3 hmdSensorPosition; // HMD glm::quat hmdSensorOrientation; // HMD glm::vec3 leapMotionOffset; // Desktop if (_isLeapOnHMD) { hmdSensorPosition = extractTranslation(inputCalibrationData.hmdSensorMat); hmdSensorOrientation = extractRotation(inputCalibrationData.hmdSensorMat); } else { // Desktop "zero" position is some distance above the Leap Motion sensor and half the avatar's shoulder-to-hand length // in front of avatar. float halfShouldToHandLength = fabsf(extractTranslation(inputCalibrationData.defaultLeftHand).x - extractTranslation(inputCalibrationData.defaultLeftArm).x) / 2.0f; leapMotionOffset = glm::vec3(0.0f, _desktopHeightOffset, halfShouldToHandLength); } for (size_t i = 0; i < joints.size(); i++) { int poseIndex = LeapMotionJointIndexToPoseIndex((LeapMotionJointIndex)i); if (joints[i].position == Vectors::ZERO) { _poseStateMap[poseIndex] = controller::Pose(); continue; } glm::vec3 pos; glm::quat rot; if (_isLeapOnHMD) { auto jointPosition = joints[i].position; const glm::vec3 HMD_EYE_TO_LEAP_OFFSET = glm::vec3(0.0f, 0.0f, -0.09f); // Eyes to surface of Leap Motion. jointPosition = glm::vec3(-jointPosition.x, -jointPosition.z, -jointPosition.y) + HMD_EYE_TO_LEAP_OFFSET; jointPosition = hmdSensorPosition + hmdSensorOrientation * jointPosition; pos = transformPoint(controllerToAvatar, jointPosition); glm::quat jointOrientation = joints[i].orientation; jointOrientation = glm::quat(jointOrientation.w, -jointOrientation.x, -jointOrientation.z, -jointOrientation.y); rot = controllerToAvatarRotation * hmdSensorOrientation * jointOrientation; } else { pos = controllerToAvatarRotation * (joints[i].position - leapMotionOffset); const glm::quat ZERO_HAND_ORIENTATION = glm::quat(glm::vec3(PI_OVER_TWO, PI, 0.0f)); rot = controllerToAvatarRotation * joints[i].orientation * ZERO_HAND_ORIENTATION; } glm::vec3 linearVelocity, angularVelocity; if (i < prevJoints.size()) { linearVelocity = (pos - (prevJoints[i].position * METERS_PER_CENTIMETER)) / deltaTime; // m/s // quat log imaginary part points along the axis of rotation, with length of one half the angle of rotation. glm::quat d = glm::log(rot * glm::inverse(prevJoints[i].orientation)); angularVelocity = glm::vec3(d.x, d.y, d.z) / (0.5f * deltaTime); // radians/s } _poseStateMap[poseIndex] = controller::Pose(pos, rot, linearVelocity, angularVelocity); } }
CoefficientSpectrum* DiffuseTracer::traceRay( const Ray& ray ){ Intersection* inter = new Intersection(); unsigned long materialIndex = closestIntersect( ray, inter ); if( hitSomething( inter ) ){ CoefficientSpectrum* lightColor; CoefficientSpectrum* materialColor; if( useRGB ){ lightColor = new RGBSpectrum( *scene->lights[0]->mat.GetColorAt(inter)); materialColor = new RGBSpectrum( *scene->renderObjects[materialIndex]->mat.GetColorAt(inter)); } else { //use SampledSpectrum lightColor = new SampledSpectrum( *scene->lights[0]->mat.GetColorAt(inter)); materialColor = new SampledSpectrum( *scene->renderObjects[materialIndex]->mat.GetColorAt(inter)); } glm::mat4 lightMatrix = scene->lights[0]->prim->transform; glm::vec3 lightPos = extractTranslation(lightMatrix); glm::vec3 lightVector = calcLightVector(inter->hitPt, lightPos); float diffuseTerm = calcDiffuseTerm(lightVector, inter); materialColor->Convolve( *lightColor ); //materialColor->TimesScalar(0.05f * diffuseTerm); materialColor->TimesScalar( diffuseTerm ); return materialColor; } else { //return background color if( useRGB ){ return new RGBSpectrum( *SampledSpectrum::IllumWhite ); } else { return new SampledSpectrum( *SampledSpectrum::IllumWhite ); } } }
AnimPose::AnimPose(const glm::mat4& mat) { scale = extractScale(mat); // quat_cast doesn't work so well with scaled matrices, so cancel it out. glm::mat4 tmp = glm::scale(mat, 1.0f / scale); rot = glm::normalize(glm::quat_cast(tmp)); trans = extractTranslation(mat); }
bool Rig::getJointPosition(int jointIndex, glm::vec3& position) const { if (jointIndex == -1 || jointIndex >= _jointStates.size()) { return false; } // position is in model-frame position = extractTranslation(_jointStates[jointIndex].getTransform()); return true; }
void SkeletonModel::renderJointConstraints(int jointIndex) { if (jointIndex == -1) { return; } const FBXGeometry& geometry = _geometry->getFBXGeometry(); const float BASE_DIRECTION_SIZE = 300.0f; float directionSize = BASE_DIRECTION_SIZE * extractUniformScale(_scale); glLineWidth(3.0f); do { const FBXJoint& joint = geometry.joints.at(jointIndex); const JointState& jointState = _jointStates.at(jointIndex); glm::vec3 position = extractTranslation(jointState.transform) + _translation; glPushMatrix(); glTranslatef(position.x, position.y, position.z); glm::quat parentRotation = (joint.parentIndex == -1) ? _rotation : _jointStates.at(joint.parentIndex).combinedRotation; glm::vec3 rotationAxis = glm::axis(parentRotation); glRotatef(glm::degrees(glm::angle(parentRotation)), rotationAxis.x, rotationAxis.y, rotationAxis.z); float fanScale = directionSize * 0.75f; glScalef(fanScale, fanScale, fanScale); const int AXIS_COUNT = 3; for (int i = 0; i < AXIS_COUNT; i++) { if (joint.rotationMin[i] <= -PI + EPSILON && joint.rotationMax[i] >= PI - EPSILON) { continue; // unconstrained } glm::vec3 axis; axis[i] = 1.0f; glm::vec3 otherAxis; if (i == 0) { otherAxis.y = 1.0f; } else { otherAxis.x = 1.0f; } glColor4f(otherAxis.r, otherAxis.g, otherAxis.b, 0.75f); glBegin(GL_TRIANGLE_FAN); glVertex3f(0.0f, 0.0f, 0.0f); const int FAN_SEGMENTS = 16; for (int j = 0; j < FAN_SEGMENTS; j++) { glm::vec3 rotated = glm::angleAxis(glm::mix(joint.rotationMin[i], joint.rotationMax[i], (float)j / (FAN_SEGMENTS - 1)), axis) * otherAxis; glVertex3f(rotated.x, rotated.y, rotated.z); } glEnd(); } glPopMatrix(); renderOrientationDirections(position, jointState.combinedRotation, directionSize); jointIndex = joint.parentIndex; } while (jointIndex != -1 && geometry.joints.at(jointIndex).isFree); glLineWidth(1.0f); }
void ViveControllerManager::handlePoseEvent(const mat4& mat, int index) { glm::vec3 position = extractTranslation(mat); glm::quat rotation = glm::quat_cast(mat); // Flip the rotation appropriately for each hand int sign = index == LEFT_HAND ? 1.0f : -1.0f; rotation = rotation * glm::angleAxis(PI, glm::vec3(1.0f, 0.0f, 0.0f)) * glm::angleAxis(sign * PI_OVER_TWO, glm::vec3(0.0f, 0.0f, 1.0f)); position += rotation * glm::vec3(0, 0, -CONTROLLER_LENGTH_OFFSET); _poseStateMap[makeInput(JointChannel(index)).getChannel()] = UserInputMapper::PoseValue(position, rotation); }
void SkeletonModel::updateVisibleJointStates() { if (_showTrueJointTransforms || !_ragdoll) { // no need to update visible transforms return; } const QVector<VerletPoint>& ragdollPoints = _ragdoll->getPoints(); QVector<glm::vec3> points; points.reserve(_jointStates.size()); glm::quat invRotation = glm::inverse(_rotation); for (int i = 0; i < _jointStates.size(); i++) { JointState& state = _jointStates[i]; points.push_back(ragdollPoints[i]._position); // get the parent state (this is the state that we want to rotate) int parentIndex = state.getParentIndex(); if (parentIndex == -1) { _jointStates[i].slaveVisibleTransform(); continue; } JointState& parentState = _jointStates[parentIndex]; // check the grand-parent index (for now we don't want to rotate any root states) int grandParentIndex = parentState.getParentIndex(); if (grandParentIndex == -1) { continue; } // make sure state's visibleTransform is up to date const glm::mat4& parentTransform = parentState.getVisibleTransform(); state.computeVisibleTransform(parentTransform); // we're looking for the rotation that moves visible bone parallel to ragdoll bone // rotationBetween(jointTip - jointPivot, shapeTip - shapePivot) // NOTE: points are in simulation-frame so rotate line segment into model-frame glm::quat delta = rotationBetween(state.getVisiblePosition() - extractTranslation(parentTransform), invRotation * (points[i] - points[parentIndex])); // apply parentState.mixVisibleRotationDelta(delta, 0.01f); // update transforms parentState.computeVisibleTransform(_jointStates[grandParentIndex].getVisibleTransform()); state.computeVisibleTransform(parentState.getVisibleTransform()); } }
void OverlayConductor::setEnabled(bool enabled) { if (enabled == _enabled) { return; } Menu::getInstance()->setIsOptionChecked(MenuOption::Overlays, enabled); _enabled = enabled; // set the new value // if the new state is visible/enabled... if (_enabled) { // alpha fadeIn the overlay mesh. qApp->getApplicationCompositor().fadeIn(); // enable mouse clicks from script qApp->getOverlays().enable(); // enable QML events auto offscreenUi = DependencyManager::get<OffscreenUi>(); offscreenUi->getRootItem()->setEnabled(true); if (_mode == STANDING) { // place the overlay at the current hmd position in world space MyAvatar* myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar(); auto camMat = cancelOutRollAndPitch(myAvatar->getSensorToWorldMatrix() * qApp->getHMDSensorPose()); Transform t; t.setTranslation(extractTranslation(camMat)); t.setRotation(glm::quat_cast(camMat)); qApp->getApplicationCompositor().setModelTransform(t); } } else { // other wise, if the new state is hidden/not enabled // alpha fadeOut the overlay mesh. qApp->getApplicationCompositor().fadeOut(); // disable mouse clicks from script qApp->getOverlays().disable(); // disable QML events auto offscreenUi = DependencyManager::get<OffscreenUi>(); offscreenUi->getRootItem()->setEnabled(false); } }
void OverlayConductor::update(float dt) { updateMode(); switch (_mode) { case SITTING: { // when sitting, the overlay is at the origin, facing down the -z axis. // the camera is taken directly from the HMD. Transform identity; qApp->getApplicationCompositor().setModelTransform(identity); qApp->getApplicationCompositor().setCameraBaseTransform(identity); break; } case STANDING: { // when standing, the overlay is at a reference position, which is set when the overlay is // enabled. The camera is taken directly from the HMD, but in world space. // So the sensorToWorldMatrix must be applied. MyAvatar* myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar(); Transform t; t.evalFromRawMatrix(myAvatar->getSensorToWorldMatrix()); qApp->getApplicationCompositor().setCameraBaseTransform(t); // detect when head moves out side of sweet spot, or looks away. mat4 headMat = myAvatar->getSensorToWorldMatrix() * qApp->getHMDSensorPose(); vec3 headWorldPos = extractTranslation(headMat); vec3 headForward = glm::quat_cast(headMat) * glm::vec3(0.0f, 0.0f, -1.0f); Transform modelXform = qApp->getApplicationCompositor().getModelTransform(); vec3 compositorWorldPos = modelXform.getTranslation(); vec3 compositorForward = modelXform.getRotation() * glm::vec3(0.0f, 0.0f, -1.0f); const float MAX_COMPOSITOR_DISTANCE = 0.6f; const float MAX_COMPOSITOR_ANGLE = 110.0f; if (_enabled && (glm::distance(headWorldPos, compositorWorldPos) > MAX_COMPOSITOR_DISTANCE || glm::dot(headForward, compositorForward) < cosf(glm::radians(MAX_COMPOSITOR_ANGLE)))) { // fade out the overlay setEnabled(false); } break; } case FLAT: // do nothing break; } }
void SkeletonModel::computeBoundingShape(const FBXGeometry& geometry) { // compute default joint transforms int numStates = _rig->getJointStateCount(); QVector<glm::mat4> transforms; transforms.fill(glm::mat4(), numStates); // compute bounding box that encloses all shapes Extents totalExtents; totalExtents.reset(); totalExtents.addPoint(glm::vec3(0.0f)); for (int i = 0; i < numStates; i++) { // compute the default transform of this joint const JointState& state = _rig->getJointState(i); int parentIndex = state.getParentIndex(); if (parentIndex == -1) { transforms[i] = _rig->getJointTransform(i); } else { glm::quat modifiedRotation = state.getPreRotation() * state.getDefaultRotation() * state.getPostRotation(); transforms[i] = transforms[parentIndex] * glm::translate(state.getTranslation()) * state.getPreTransform() * glm::mat4_cast(modifiedRotation) * state.getPostTransform(); } // Each joint contributes a sphere at its position glm::vec3 axis(state.getBoneRadius()); glm::vec3 jointPosition = extractTranslation(transforms[i]); totalExtents.addPoint(jointPosition + axis); totalExtents.addPoint(jointPosition - axis); } // compute bounding shape parameters // NOTE: we assume that the longest side of totalExtents is the yAxis... glm::vec3 diagonal = totalExtents.maximum - totalExtents.minimum; // ... and assume the radius is half the RMS of the X and Z sides: _boundingCapsuleRadius = 0.5f * sqrtf(0.5f * (diagonal.x * diagonal.x + diagonal.z * diagonal.z)); _boundingCapsuleHeight = diagonal.y - 2.0f * _boundingCapsuleRadius; glm::vec3 rootPosition = _rig->getJointState(geometry.rootJointIndex).getPosition(); _boundingCapsuleLocalOffset = 0.5f * (totalExtents.maximum + totalExtents.minimum) - rootPosition; _boundingRadius = 0.5f * glm::length(diagonal); }
void SkeletonModel::stretchArm(int jointIndex, const glm::vec3& position) { // find out where the hand is pointing glm::quat handRotation; getJointRotation(jointIndex, handRotation, true); const FBXGeometry& geometry = _geometry->getFBXGeometry(); glm::vec3 forwardVector(jointIndex == geometry.rightHandJointIndex ? -1.0f : 1.0f, 0.0f, 0.0f); glm::vec3 handVector = handRotation * forwardVector; // align elbow with hand const FBXJoint& joint = geometry.joints.at(jointIndex); if (joint.parentIndex == -1) { return; } glm::quat elbowRotation; getJointRotation(joint.parentIndex, elbowRotation, true); applyRotationDelta(joint.parentIndex, rotationBetween(elbowRotation * forwardVector, handVector), false); // set position according to normal length float scale = extractUniformScale(_scale); glm::vec3 handPosition = position - _translation; glm::vec3 elbowPosition = handPosition - handVector * joint.distanceToParent * scale; // set shoulder orientation to point to elbow const FBXJoint& parentJoint = geometry.joints.at(joint.parentIndex); if (parentJoint.parentIndex == -1) { return; } glm::quat shoulderRotation; getJointRotation(parentJoint.parentIndex, shoulderRotation, true); applyRotationDelta(parentJoint.parentIndex, rotationBetween(shoulderRotation * forwardVector, elbowPosition - extractTranslation(_jointStates.at(parentJoint.parentIndex).transform)), false); // update the shoulder state updateJointState(parentJoint.parentIndex); // adjust the elbow's local translation setJointTranslation(joint.parentIndex, elbowPosition); }
void OverlayConductor::updateMode() { Mode newMode; if (qApp->isHMDMode()) { newMode = SITTING; } else { newMode = FLAT; } if (newMode != _mode) { switch (newMode) { case SITTING: { // enter the SITTING state // place the overlay at origin Transform identity; qApp->getApplicationCompositor().setModelTransform(identity); break; } case STANDING: { // enter the STANDING state // place the overlay at the current hmd position in world space MyAvatar* myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar(); auto camMat = cancelOutRollAndPitch(myAvatar->getSensorToWorldMatrix() * qApp->getHMDSensorPose()); Transform t; t.setTranslation(extractTranslation(camMat)); t.setRotation(glm::quat_cast(camMat)); qApp->getApplicationCompositor().setModelTransform(t); break; } case FLAT: // do nothing break; } } _mode = newMode; }
glm::vec3 HMDScriptingInterface::getPosition() const { if (qApp->getActiveDisplayPlugin()->isHmd()) { return extractTranslation(getWorldHMDMatrix()); } return glm::vec3(); }
// Called within Model::simulate call, below. void SkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) { const FBXGeometry& geometry = getFBXGeometry(); Head* head = _owningAvatar->getHead(); if (_owningAvatar->isMyAvatar()) { MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar); Rig::HeadParameters headParams; headParams.enableLean = qApp->isHMDMode(); headParams.leanSideways = head->getFinalLeanSideways(); headParams.leanForward = head->getFinalLeanForward(); headParams.torsoTwist = head->getTorsoTwist(); if (qApp->isHMDMode()) { headParams.isInHMD = true; // get HMD position from sensor space into world space, and back into rig space glm::mat4 worldHMDMat = myAvatar->getSensorToWorldMatrix() * myAvatar->getHMDSensorMatrix(); glm::mat4 rigToWorld = createMatFromQuatAndPos(getRotation(), getTranslation()); glm::mat4 worldToRig = glm::inverse(rigToWorld); glm::mat4 rigHMDMat = worldToRig * worldHMDMat; headParams.rigHeadPosition = extractTranslation(rigHMDMat); headParams.rigHeadOrientation = extractRotation(rigHMDMat); headParams.worldHeadOrientation = extractRotation(worldHMDMat); } else { headParams.isInHMD = false; // We don't have a valid localHeadPosition. headParams.rigHeadOrientation = Quaternions::Y_180 * head->getFinalOrientationInLocalFrame(); headParams.worldHeadOrientation = head->getFinalOrientationInWorldFrame(); } headParams.leanJointIndex = geometry.leanJointIndex; headParams.neckJointIndex = geometry.neckJointIndex; headParams.isTalking = head->getTimeWithoutTalking() <= 1.5f; _rig->updateFromHeadParameters(headParams, deltaTime); Rig::HandParameters handParams; auto leftPose = myAvatar->getLeftHandControllerPoseInAvatarFrame(); if (leftPose.isValid()) { handParams.isLeftEnabled = true; handParams.leftPosition = Quaternions::Y_180 * leftPose.getTranslation(); handParams.leftOrientation = Quaternions::Y_180 * leftPose.getRotation(); } else { handParams.isLeftEnabled = false; } auto rightPose = myAvatar->getRightHandControllerPoseInAvatarFrame(); if (rightPose.isValid()) { handParams.isRightEnabled = true; handParams.rightPosition = Quaternions::Y_180 * rightPose.getTranslation(); handParams.rightOrientation = Quaternions::Y_180 * rightPose.getRotation(); } else { handParams.isRightEnabled = false; } handParams.bodyCapsuleRadius = myAvatar->getCharacterController()->getCapsuleRadius(); handParams.bodyCapsuleHalfHeight = myAvatar->getCharacterController()->getCapsuleHalfHeight(); handParams.bodyCapsuleLocalOffset = myAvatar->getCharacterController()->getCapsuleLocalOffset(); _rig->updateFromHandParameters(handParams, deltaTime); Rig::CharacterControllerState ccState = convertCharacterControllerState(myAvatar->getCharacterController()->getState()); auto velocity = myAvatar->getLocalVelocity(); auto position = myAvatar->getLocalPosition(); auto orientation = myAvatar->getLocalOrientation(); _rig->computeMotionAnimationState(deltaTime, position, velocity, orientation, ccState); // evaluate AnimGraph animation and update jointStates. Model::updateRig(deltaTime, parentTransform); Rig::EyeParameters eyeParams; eyeParams.worldHeadOrientation = headParams.worldHeadOrientation; eyeParams.eyeLookAt = head->getLookAtPosition(); eyeParams.eyeSaccade = head->getSaccade(); eyeParams.modelRotation = getRotation(); eyeParams.modelTranslation = getTranslation(); eyeParams.leftEyeJointIndex = geometry.leftEyeJointIndex; eyeParams.rightEyeJointIndex = geometry.rightEyeJointIndex; _rig->updateFromEyeParameters(eyeParams); } else { Model::updateRig(deltaTime, parentTransform); // This is a little more work than we really want. // // Other avatars joint, including their eyes, should already be set just like any other joints // from the wire data. But when looking at me, we want the eyes to use the corrected lookAt. // // Thus this should really only be ... else if (_owningAvatar->getHead()->isLookingAtMe()) {... // However, in the !isLookingAtMe case, the eyes aren't rotating the way they should right now. // We will revisit that as priorities allow, and particularly after the new rig/animation/joints. // If the head is not positioned, updateEyeJoints won't get the math right glm::quat headOrientation; _rig->getJointRotation(geometry.headJointIndex, headOrientation); glm::vec3 eulers = safeEulerAngles(headOrientation); head->setBasePitch(glm::degrees(-eulers.x)); head->setBaseYaw(glm::degrees(eulers.y)); head->setBaseRoll(glm::degrees(-eulers.z)); Rig::EyeParameters eyeParams; eyeParams.worldHeadOrientation = head->getFinalOrientationInWorldFrame(); eyeParams.eyeLookAt = head->getCorrectedLookAtPosition(); eyeParams.eyeSaccade = glm::vec3(); eyeParams.modelRotation = getRotation(); eyeParams.modelTranslation = getTranslation(); eyeParams.leftEyeJointIndex = geometry.leftEyeJointIndex; eyeParams.rightEyeJointIndex = geometry.rightEyeJointIndex; _rig->updateFromEyeParameters(eyeParams); } }
CoefficientSpectrum* ArealightTracer::traceRay_r( const Ray& ray, int bounceNum) { if( ray.GetStart().x > 9000 ) { //std::cout << "Using yellow" << std::endl; return new RGBSpectrum( *SampledSpectrum::IllumYellow ); } Intersection* inter = new Intersection(); unsigned long materialIndex = closestIntersect( ray, inter ); if( hitSomething( inter ) ) { CoefficientSpectrum* lightColor; CoefficientSpectrum* materialColor; CoefficientSpectrum* diffuseColor; CoefficientSpectrum* specularColor; Material mat = scene->renderObjects[materialIndex]->mat; if( useRGB ) { lightColor = new RGBSpectrum( *scene->lights[0]->mat.GetColorAt(inter)); materialColor = new RGBSpectrum(*mat.GetColorAt(inter)); specularColor = new RGBSpectrum( *scene->lights[0]->mat.GetColorAt(inter) ); diffuseColor = new RGBSpectrum( *scene->lights[0]->mat.GetColorAt(inter) ); } else { //use SampledSpectrum lightColor = new SampledSpectrum( *scene->lights[0]->mat.GetColorAt(inter)); materialColor = new SampledSpectrum(*mat.GetColorAt(inter)); specularColor = new SampledSpectrum( *scene->lights[0]->mat.GetColorAt(inter) ); diffuseColor = new SampledSpectrum( *scene->lights[0]->mat.GetColorAt(inter) ); } glm::mat4 lightMatrix = scene->lights[0]->prim->transform; glm::vec3 lightPos = extractTranslation(lightMatrix); glm::vec3 lightVector = calcLightVector(inter->hitPt, lightPos); //sfDir is the direction of the shadow feeler. //glm::vec3 sfDir = glm::normalize(lightPos - inter->hitPt); //Ray sfRay(inter->hitPt + 0.01f*sfDir, sfDir); //float lightDistance = sfRay.ParamAtPt( lightPos ); //Intersection* sfInter = new Intersection(); //unsigned long obstructIndex = closestIntersect( sfRay, sfInter ); float diffuseTerm = 0.0f; float specularTerm = 0.0f; //Material obstructMat = scene->renderObjects[obstructIndex]->mat; //if( hitSomething( sfInter ) && obstructMat.transparency < EPSILON //&& ( sfInter->tParam < lightDistance ) ) { //light obstructed //diffuseTerm = 0.1f; //specularTerm = 0.0f; //} else { //diffuseTerm = calcDiffuseTerm(lightVector, inter); //specularTerm = calcSpecularTerm(inter->hitPt, //scene->cam.m_eye, mat.specPow, inter); //} float lightContrib = sampleAreaLight( inter ); //float lightContrib = 1; diffuseTerm = calcDiffuseTerm(lightVector, inter) * lightContrib; specularTerm = calcSpecularTerm(inter->hitPt, scene->cam.m_eye, mat.specPow, inter) * lightContrib; //HACK: from before, specularColor and diffuseColor all start out //as lightColor diffuseColor->TimesScalar( DIFFUSE_CONST * diffuseTerm ); diffuseColor->Convolve( *materialColor ); specularColor->TimesScalar( SPECULAR_CONST * specularTerm ); //HACK BELOW: MODIFY specular color, then SHALLOW COPY to final color! specularColor->AddHeapCS(diffuseColor); CoefficientSpectrum* finalColor = specularColor; //materialColor->TimesScalar( diffuseTerm ); if ( mat.transparency > 0.0f && bounceNum < BOUNCE_LIMIT ) { Ray refrRay = ray.GetRefracted( inter->normal, inter->hitPt, mat.refractiveIndex ); //refrRay.isRefracted = true; CoefficientSpectrum* refrColor = traceRay_r( refrRay, bounceNum + 1); finalColor->TimesScalar(1 - mat.transparency); refrColor->TimesScalar(mat.transparency); finalColor->AddHeapCS(refrColor); } if( mat.reflectivity > 0.0f && bounceNum < BOUNCE_LIMIT) { Ray reflRay = ray.GetReflected( inter->normal, inter->hitPt); CoefficientSpectrum* reflColor = traceRay_r( reflRay, bounceNum + 1); finalColor->TimesScalar(1 - mat.reflectivity); reflColor->TimesScalar(mat.reflectivity); finalColor->AddHeapCS(reflColor); } delete lightColor; delete materialColor; delete diffuseColor; delete inter; return finalColor; } else { //return background color if( useRGB ) { delete inter; return new RGBSpectrum( *SampledSpectrum::IllumWhite ); } else { delete inter; return new SampledSpectrum( *SampledSpectrum::IllumWhite ); } } }
void SkeletonModel::computeBoundingShape(const FBXGeometry& geometry) { // compute default joint transforms int numStates = _jointStates.size(); assert(numStates == _shapes.size()); QVector<glm::mat4> transforms; transforms.fill(glm::mat4(), numStates); // compute bounding box that encloses all shapes Extents totalExtents; totalExtents.reset(); totalExtents.addPoint(glm::vec3(0.0f)); for (int i = 0; i < numStates; i++) { // compute the default transform of this joint JointState& state = _jointStates[i]; const FBXJoint& joint = state.getFBXJoint(); int parentIndex = joint.parentIndex; if (parentIndex == -1) { transforms[i] = _jointStates[i].getTransform(); } else { glm::quat modifiedRotation = joint.preRotation * joint.rotation * joint.postRotation; transforms[i] = transforms[parentIndex] * glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(modifiedRotation) * joint.postTransform; } // Each joint contributes its point to the bounding box glm::vec3 jointPosition = extractTranslation(transforms[i]); totalExtents.addPoint(jointPosition); Shape* shape = _shapes[i]; if (!shape) { continue; } // Each joint with a shape contributes to the totalExtents: a box // that contains the sphere centered at the end of the joint with radius of the bone. // TODO: skip hand and arm shapes for bounding box calculation int type = shape->getType(); if (type == CAPSULE_SHAPE) { // add the two furthest surface points of the capsule CapsuleShape* capsule = static_cast<CapsuleShape*>(shape); float radius = capsule->getRadius(); glm::vec3 axis(radius); Extents shapeExtents; shapeExtents.reset(); shapeExtents.addPoint(jointPosition + axis); shapeExtents.addPoint(jointPosition - axis); totalExtents.addExtents(shapeExtents); } else if (type == SPHERE_SHAPE) { float radius = shape->getBoundingRadius(); glm::vec3 axis(radius); Extents shapeExtents; shapeExtents.reset(); shapeExtents.addPoint(jointPosition + axis); shapeExtents.addPoint(jointPosition - axis); totalExtents.addExtents(shapeExtents); } } // compute bounding shape parameters // NOTE: we assume that the longest side of totalExtents is the yAxis... glm::vec3 diagonal = totalExtents.maximum - totalExtents.minimum; // ... and assume the radius is half the RMS of the X and Z sides: float capsuleRadius = 0.5f * sqrtf(0.5f * (diagonal.x * diagonal.x + diagonal.z * diagonal.z)); _boundingShape.setRadius(capsuleRadius); _boundingShape.setHalfHeight(0.5f * diagonal.y - capsuleRadius); glm::vec3 rootPosition = _jointStates[geometry.rootJointIndex].getPosition(); _boundingShapeLocalOffset = 0.5f * (totalExtents.maximum + totalExtents.minimum) - rootPosition; _boundingRadius = 0.5f * glm::length(diagonal); }
AnimPose::AnimPose(const glm::mat4& mat) { scale = extractScale(mat); rot = glm::normalize(glm::quat_cast(mat)); trans = extractTranslation(mat); }
controller::Pose openVrControllerPoseToHandPose(bool isLeftHand, const mat4& mat, const vec3& linearVelocity, const vec3& angularVelocity) { // When the sensor-to-world rotation is identity the coordinate axes look like this: // // user // forward // -z // | // y| user // y o----x right // o-----x user // | up // | // z // // Rift // From ABOVE the hand canonical axes looks like this: // // | | | | y | | | | // | | | | | | | | | // | | | | | // |left | / x---- + \ |right| // | _/ z \_ | // | | | | // | | | | // // So when the user is in Rift space facing the -zAxis with hands outstretched and palms down // the rotation to align the Touch axes with those of the hands is: // // touchToHand = halfTurnAboutY * quaterTurnAboutX // Due to how the Touch controllers fit into the palm there is an offset that is different for each hand. // You can think of this offset as the inverse of the measured rotation when the hands are posed, such that // the combination (measurement * offset) is identity at this orientation. // // Qoffset = glm::inverse(deltaRotation when hand is posed fingers forward, palm down) // // An approximate offset for the Touch can be obtained by inspection: // // Qoffset = glm::inverse(glm::angleAxis(sign * PI/2.0f, zAxis) * glm::angleAxis(PI/4.0f, xAxis)) // // So the full equation is: // // Q = combinedMeasurement * touchToHand // // Q = (deltaQ * QOffset) * (yFlip * quarterTurnAboutX) // // Q = (deltaQ * inverse(deltaQForAlignedHand)) * (yFlip * quarterTurnAboutX) static const glm::quat yFlip = glm::angleAxis(PI, Vectors::UNIT_Y); static const glm::quat quarterX = glm::angleAxis(PI_OVER_TWO, Vectors::UNIT_X); static const glm::quat touchToHand = yFlip * quarterX; static const glm::quat leftQuarterZ = glm::angleAxis(-PI_OVER_TWO, Vectors::UNIT_Z); static const glm::quat rightQuarterZ = glm::angleAxis(PI_OVER_TWO, Vectors::UNIT_Z); static const glm::quat eighthX = glm::angleAxis(PI / 4.0f, Vectors::UNIT_X); static const glm::quat leftRotationOffset = glm::inverse(leftQuarterZ * eighthX) * touchToHand; static const glm::quat rightRotationOffset = glm::inverse(rightQuarterZ * eighthX) * touchToHand; // this needs to match the leftBasePosition in tutorial/viveControllerConfiguration.js:21 static const float CONTROLLER_LATERAL_OFFSET = 0.0381f; static const float CONTROLLER_VERTICAL_OFFSET = 0.0495f; static const float CONTROLLER_FORWARD_OFFSET = 0.1371f; static const glm::vec3 CONTROLLER_OFFSET(CONTROLLER_LATERAL_OFFSET, CONTROLLER_VERTICAL_OFFSET, CONTROLLER_FORWARD_OFFSET); static const glm::vec3 leftTranslationOffset = glm::vec3(-1.0f, 1.0f, 1.0f) * CONTROLLER_OFFSET; static const glm::vec3 rightTranslationOffset = CONTROLLER_OFFSET; auto translationOffset = (isLeftHand ? leftTranslationOffset : rightTranslationOffset); auto rotationOffset = (isLeftHand ? leftRotationOffset : rightRotationOffset); glm::vec3 position = extractTranslation(mat); glm::quat rotation = glm::normalize(glm::quat_cast(mat)); position += rotation * translationOffset; rotation = rotation * rotationOffset; // transform into avatar frame auto result = controller::Pose(position, rotation); // handle change in velocity due to translationOffset result.velocity = linearVelocity + glm::cross(angularVelocity, position - extractTranslation(mat)); result.angularVelocity = angularVelocity; return result; }
void SkeletonModel::computeBoundingShape(const FBXGeometry& geometry) { // compute default joint transforms int numStates = _jointStates.size(); QVector<glm::mat4> transforms; transforms.fill(glm::mat4(), numStates); QVector<VerletPoint>& ragdollPoints = _ragdoll->getPoints(); // compute the default transforms and slam the ragdoll positions accordingly // (which puts the shapes where we want them) for (int i = 0; i < numStates; i++) { JointState& state = _jointStates[i]; const FBXJoint& joint = state.getFBXJoint(); int parentIndex = joint.parentIndex; if (parentIndex == -1) { transforms[i] = _jointStates[i].getTransform(); ragdollPoints[i].initPosition(extractTranslation(transforms[i])); continue; } glm::quat modifiedRotation = joint.preRotation * joint.rotation * joint.postRotation; transforms[i] = transforms[parentIndex] * glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(modifiedRotation) * joint.postTransform; // setting the ragdollPoints here slams the VerletShapes into their default positions ragdollPoints[i].initPosition(extractTranslation(transforms[i])); } // compute bounding box that encloses all shapes Extents totalExtents; totalExtents.reset(); totalExtents.addPoint(glm::vec3(0.0f)); for (int i = 0; i < _shapes.size(); i++) { Shape* shape = _shapes[i]; if (!shape) { continue; } // TODO: skip hand and arm shapes for bounding box calculation Extents shapeExtents; shapeExtents.reset(); glm::vec3 localPosition = shape->getTranslation(); int type = shape->getType(); if (type == CAPSULE_SHAPE) { // add the two furthest surface points of the capsule CapsuleShape* capsule = static_cast<CapsuleShape*>(shape); glm::vec3 axis; capsule->computeNormalizedAxis(axis); float radius = capsule->getRadius(); float halfHeight = capsule->getHalfHeight(); axis = halfHeight * axis + glm::vec3(radius); shapeExtents.addPoint(localPosition + axis); shapeExtents.addPoint(localPosition - axis); totalExtents.addExtents(shapeExtents); } else if (type == SPHERE_SHAPE) { float radius = shape->getBoundingRadius(); glm::vec3 axis = glm::vec3(radius); shapeExtents.addPoint(localPosition + axis); shapeExtents.addPoint(localPosition - axis); totalExtents.addExtents(shapeExtents); } } // compute bounding shape parameters // NOTE: we assume that the longest side of totalExtents is the yAxis... glm::vec3 diagonal = totalExtents.maximum - totalExtents.minimum; // ... and assume the radius is half the RMS of the X and Z sides: float capsuleRadius = 0.5f * sqrtf(0.5f * (diagonal.x * diagonal.x + diagonal.z * diagonal.z)); _boundingShape.setRadius(capsuleRadius); _boundingShape.setHalfHeight(0.5f * diagonal.y - capsuleRadius); glm::vec3 rootPosition = _jointStates[geometry.rootJointIndex].getPosition(); _boundingShapeLocalOffset = 0.5f * (totalExtents.maximum + totalExtents.minimum) - rootPosition; _boundingRadius = 0.5f * glm::length(diagonal); }
bool RenderableModelEntityItem::getAnimationFrame() { bool newFrame = false; if (!_model || !_model->isActive() || !_model->isLoaded() || _needsInitialSimulation) { return false; } if (!hasAnimation() || !_jointMappingCompleted) { return false; } AnimationPointer myAnimation = getAnimation(_animationProperties.getURL()); // FIXME: this could be optimized if (myAnimation && myAnimation->isLoaded()) { const QVector<FBXAnimationFrame>& frames = myAnimation->getFramesReference(); // NOTE: getFrames() is too heavy auto& fbxJoints = myAnimation->getGeometry().joints; int frameCount = frames.size(); if (frameCount > 0) { int animationCurrentFrame = (int)(glm::floor(getAnimationCurrentFrame())) % frameCount; if (animationCurrentFrame < 0 || animationCurrentFrame > frameCount) { animationCurrentFrame = 0; } if (animationCurrentFrame != _lastKnownCurrentFrame) { _lastKnownCurrentFrame = animationCurrentFrame; newFrame = true; resizeJointArrays(); if (_jointMapping.size() != _model->getJointStateCount()) { qDebug() << "RenderableModelEntityItem::getAnimationFrame -- joint count mismatch" << _jointMapping.size() << _model->getJointStateCount(); assert(false); return false; } const QVector<glm::quat>& rotations = frames[animationCurrentFrame].rotations; const QVector<glm::vec3>& translations = frames[animationCurrentFrame].translations; for (int j = 0; j < _jointMapping.size(); j++) { int index = _jointMapping[j]; if (index >= 0) { glm::mat4 translationMat; if (index < translations.size()) { translationMat = glm::translate(translations[index]); } glm::mat4 rotationMat(glm::mat4::_null); if (index < rotations.size()) { rotationMat = glm::mat4_cast(fbxJoints[index].preRotation * rotations[index] * fbxJoints[index].postRotation); } else { rotationMat = glm::mat4_cast(fbxJoints[index].preRotation * fbxJoints[index].postRotation); } glm::mat4 finalMat = (translationMat * fbxJoints[index].preTransform * rotationMat * fbxJoints[index].postTransform); _absoluteJointTranslationsInObjectFrame[j] = extractTranslation(finalMat); _absoluteJointTranslationsInObjectFrameSet[j] = true; _absoluteJointTranslationsInObjectFrameDirty[j] = true; _absoluteJointRotationsInObjectFrame[j] = glmExtractRotation(finalMat); _absoluteJointRotationsInObjectFrameSet[j] = true; _absoluteJointRotationsInObjectFrameDirty[j] = true; } } } } } return newFrame; }