//--------------------------------------------------------------------------------- bool GlesFullScreenQuad::initialize() { destroy(); static Vector4d vertices[] = { Vector4d(-1.0f, -1.0f, 0.0f, 0.0f), Vector4d(-1.0f, 1.0f, 0.0f, 1.0f), Vector4d( 1.0f, -1.0f, 1.0f, 0.0f), Vector4d( 1.0f, 1.0f, 1.0f, 1.0f) }; const uint32_t vertexBufferSize = sizeof(vertices); glGenBuffers(1, &vertexBuffer_); glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_); CHECK_GLES_ERROR("GlesFullScreenQuad::initialize: glBindBuffer"); glBufferData(GL_ARRAY_BUFFER, vertexBufferSize, vertices, GL_STATIC_DRAW); CHECK_GLES_ERROR("GlesFullScreenQuad::initialize: glBufferData"); glBindBuffer(GL_ARRAY_BUFFER, 0); return true; }
void test_mapstride() { for(int i = 0; i < g_repeat; i++) { EIGEN_UNUSED int maxn = 30; CALL_SUBTEST_1( map_class_vector<Aligned>(Matrix<float, 1, 1>()) ); CALL_SUBTEST_1( map_class_vector<Unaligned>(Matrix<float, 1, 1>()) ); CALL_SUBTEST_2( map_class_vector<Aligned>(Vector4d()) ); CALL_SUBTEST_2( map_class_vector<Unaligned>(Vector4d()) ); CALL_SUBTEST_3( map_class_vector<Aligned>(RowVector4f()) ); CALL_SUBTEST_3( map_class_vector<Unaligned>(RowVector4f()) ); CALL_SUBTEST_4( map_class_vector<Aligned>(VectorXcf(internal::random<int>(1,maxn))) ); CALL_SUBTEST_4( map_class_vector<Unaligned>(VectorXcf(internal::random<int>(1,maxn))) ); CALL_SUBTEST_5( map_class_vector<Aligned>(VectorXi(internal::random<int>(1,maxn))) ); CALL_SUBTEST_5( map_class_vector<Unaligned>(VectorXi(internal::random<int>(1,maxn))) ); CALL_SUBTEST_1( map_class_matrix<Aligned>(Matrix<float, 1, 1>()) ); CALL_SUBTEST_1( map_class_matrix<Unaligned>(Matrix<float, 1, 1>()) ); CALL_SUBTEST_2( map_class_matrix<Aligned>(Matrix4d()) ); CALL_SUBTEST_2( map_class_matrix<Unaligned>(Matrix4d()) ); CALL_SUBTEST_3( map_class_matrix<Aligned>(Matrix<float,3,5>()) ); CALL_SUBTEST_3( map_class_matrix<Unaligned>(Matrix<float,3,5>()) ); CALL_SUBTEST_3( map_class_matrix<Aligned>(Matrix<float,4,8>()) ); CALL_SUBTEST_3( map_class_matrix<Unaligned>(Matrix<float,4,8>()) ); CALL_SUBTEST_4( map_class_matrix<Aligned>(MatrixXcf(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); CALL_SUBTEST_4( map_class_matrix<Unaligned>(MatrixXcf(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); CALL_SUBTEST_5( map_class_matrix<Aligned>(MatrixXi(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); CALL_SUBTEST_5( map_class_matrix<Unaligned>(MatrixXi(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); CALL_SUBTEST_6( map_class_matrix<Aligned>(MatrixXcd(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); CALL_SUBTEST_6( map_class_matrix<Unaligned>(MatrixXcd(internal::random<int>(1,maxn),internal::random<int>(1,maxn))) ); } }
ColorBank::Colorf ColorBank::colorf(DotPath const &path) const { if (path.isEmpty()) return Colorf(); Vector4d clamped = data(path).as<Impl::ColorData>().color; clamped = clamped.max(Vector4d(0, 0, 0, 0)).min(Vector4d(1, 1, 1, 1)); return Colorf(float(clamped.x), float(clamped.y), float(clamped.z), float(clamped.w)); }
void test_mapped_matrix() { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( map_class_vector(Matrix<float, 1, 1>()) ); CALL_SUBTEST_1( check_const_correctness(Matrix<float, 1, 1>()) ); CALL_SUBTEST_2( map_class_vector(Vector4d()) ); CALL_SUBTEST_2( map_class_vector(VectorXd(13)) ); CALL_SUBTEST_2( check_const_correctness(Matrix4d()) ); CALL_SUBTEST_3( map_class_vector(RowVector4f()) ); CALL_SUBTEST_4( map_class_vector(VectorXcf(8)) ); CALL_SUBTEST_5( map_class_vector(VectorXi(12)) ); CALL_SUBTEST_5( check_const_correctness(VectorXi(12)) ); CALL_SUBTEST_1( map_class_matrix(Matrix<float, 1, 1>()) ); CALL_SUBTEST_2( map_class_matrix(Matrix4d()) ); CALL_SUBTEST_11( map_class_matrix(Matrix<float,3,5>()) ); CALL_SUBTEST_4( map_class_matrix(MatrixXcf(internal::random<int>(1,10),internal::random<int>(1,10))) ); CALL_SUBTEST_5( map_class_matrix(MatrixXi(internal::random<int>(1,10),internal::random<int>(1,10))) ); CALL_SUBTEST_6( map_static_methods(Matrix<double, 1, 1>()) ); CALL_SUBTEST_7( map_static_methods(Vector3f()) ); CALL_SUBTEST_8( map_static_methods(RowVector3d()) ); CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) ); CALL_SUBTEST_10( map_static_methods(VectorXf(12)) ); CALL_SUBTEST_11( map_not_aligned_on_scalar<double>() ); } }
Vector PickObjectTool::GetWorldCoordinates(BaseDraw* bd, const Matrix4d& m, Float x, Float y, Float z) { // pick object returns the view-projection matrix. This transforms a point in camera space into clip space. Int32 l, t, r, b, w, h; Vector4d pos; Vector posWorld; bd->GetFrame(&l, &t, &r, &b); if (l == r || b == t) return Vector(0.0); w = r - l; h = b - t; // first, transform the points into clip space pos.x = (x - Float(l)) / Float(w); pos.y = (y - Float(t)) / Float(h); pos.z = z; pos.w = 1.0; pos = pos * 2.0f - Vector4d(1.0f); pos.y = -pos.y; // apply the inverse view transform Matrix4d im = !m; pos = im * pos; pos.MakeVector3(); // convert it into a 3-tupel posWorld = bd->GetMg() * GetVector3(pos); return posWorld; }
void test_nullary() { CALL_SUBTEST_1( testMatrixType(Matrix2d()) ); CALL_SUBTEST_2( testMatrixType(MatrixXcf(internal::random<int>(1,300),internal::random<int>(1,300))) ); CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random<int>(1,300),internal::random<int>(1,300))) ); for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_4( testVectorType(VectorXd(internal::random<int>(1,300))) ); CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232 CALL_SUBTEST_6( testVectorType(Vector3d()) ); CALL_SUBTEST_7( testVectorType(VectorXf(internal::random<int>(1,300))) ); CALL_SUBTEST_8( testVectorType(Vector3f()) ); CALL_SUBTEST_8( testVectorType(Vector4f()) ); CALL_SUBTEST_8( testVectorType(Matrix<float,8,1>()) ); CALL_SUBTEST_8( testVectorType(Matrix<float,1,1>()) ); CALL_SUBTEST_9( testVectorType(VectorXi(internal::random<int>(1,300))) ); CALL_SUBTEST_9( testVectorType(Matrix<int,1,1>()) ); } #ifdef EIGEN_TEST_PART_6 // Assignment of a RowVectorXd to a MatrixXd (regression test for bug #79). VERIFY( (MatrixXd(RowVectorXd::LinSpaced(3, 0, 1)) - RowVector3d(0, 0.5, 1)).norm() < std::numeric_limits<double>::epsilon() ); #endif }
void teleop_gui_t::render_link(kinematics::Skeleton* robot, kinematics::BodyNode *link, const robot::robot_state_t& state, Eigen::Vector4d color, bool use_default_color, bool draw_limits, int target_joint) { if(!link) return; glPushMatrix(); // Do self transform kinematics::Joint* joint = link->getParentJoint(); for(int i=0; i < joint->getNumTransforms(); ++i) { joint->getTransform(i)->applyGLTransform(mRI); } kinematics::Shape* shape = link->getVisualizationShape(); if(shape && !draw_limits) { glPushMatrix(); shape->draw(mRI, color, use_default_color); glPopMatrix(); } if(shape && draw_limits) { render_limits(robot, link, state, color, Vector4d(1,0,0,color[3])); } // Render subtree for(int i=0; i < link->getNumChildJoints(); ++i) { render_link(robot, link->getChildJoint(i)->getChildNode(), state, color, use_default_color, draw_limits, target_joint); } glPopMatrix(); }
Vector4d Triangle::normal(Vector4d x, Vector4d y, Vector4d z) const { Vector4d edge1 = y - x; Vector4d edge2 = z - x; Vector3d e1 = edge1.head(3); Vector3d e2 = edge2.head(3); Vector3d result = e1.cross(e2); return Vector4d(result(0), result(1), result(2), 0); }
Vector4d operator-(const Vector4d & _vec1, const Vector4d & _vec2) { Vector4d newvec = Vector4d(); newvec.data[0] = _vec1[0] - _vec2[0]; newvec.data[1] = _vec1[1] - _vec2[1]; newvec.data[2] = _vec1[2] - _vec2[2]; newvec.data[3] = _vec1[3] - _vec2[3]; return newvec; }
Vector4d operator*(const Vector4d& _vec, const double numb) { Vector4d newvec = Vector4d(); newvec.data[0] = _vec[0] * numb; newvec.data[1] = _vec[1] * numb; newvec.data[2] = _vec[2] * numb; newvec.data[3] = _vec[3] * numb; return newvec; }
Matrix4d Sim3:: hat(const Vector7d & v) { Matrix4d Omega; Omega.topLeftCorner<3,3>() = ScSO3::hat(v.tail<4>()); Omega.col(3).head<3>() = v.head<3>(); Omega.row(3) = Vector4d(0., 0., 0., 0.); return Omega; }
void Build_CAM4D_Matrix_UVN(CAM4D_PTR cam, int mode) { Matrix4d mt_inv, mt_uvn; mt_inv << 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0. - cam->pos(0), -cam->pos(1), -cam->pos(2), 1; if (mode == UVN_MODE_SPHERICAL) { float phi = cam->dir(0); float theta = cam->dir(1); float sin_phi = Fast_Sin(phi); float cos_phi = Fast_Cos(phi); float sin_theta = Fast_Sin(theta); float cos_theta = Fast_Cos(theta); cam->target(0) = -1 * sin_phi * sin_theta; cam->target(1) = 1 * cos_phi; cam->target(2) = 1 * sin_phi * cos_theta; } cam->n = cam->target - cam->pos; Vector3d t1, t2, t3; t1 = Vector3d(cam->n(0), cam->n(1), cam->n(2)); t2 = Vector3d(0, 1, 0); t3 = t2.cross(t1); t2 = t1.cross(t3); cam->u = Vector4d(t3(0), t3(1), t3(2), 1); cam->v = Vector4d(t2(0), t2(1), t2(2), 1); cam->u.normalize(); cam->v.normalize(); cam->n.normalize(); mt_uvn << cam->u(0), cam->v(0), cam->n(0), 0, cam->u(1), cam->v(1), cam->n(1), 0, cam->u(2), cam->v(2), cam->n(2), 0, 0, 0, 0, 1; cam->mcam = mt_inv * mt_uvn; }
Vector4d operator*(const Matrix4x4& matrix, const Vector4d& vector) { return Vector4d ( matrix(0, 0) * vector[0] + matrix(0, 1) * vector[1] + matrix(0, 2) * vector[2] + matrix(0, 3) * vector[3], matrix(1, 0) * vector[0] + matrix(1, 1) * vector[1] + matrix(1, 2) * vector[2] + matrix(1, 3) * vector[3], matrix(2, 0) * vector[0] + matrix(2, 1) * vector[1] + matrix(2, 2) * vector[2] + matrix(2, 3) * vector[3], matrix(3, 0) * vector[0] + matrix(3, 1) * vector[1] + matrix(3, 2) * vector[2] + matrix(3, 3) * vector[3] ); }
Vector4d Vector4d::operator-(Vector4d & another){ GLdouble x = v[0] / v[3]; GLdouble y = v[1] / v[3]; GLdouble z = v[2] / v[3]; GLdouble w = v[3]; x = (x - another[0] / another[3]) * w; y = (y - another[1] / another[3]) * w; z = (z - another[2] / another[3]) * w; return Vector4d(x, y, z, w); }
Camera::Camera(Camera::CameraType t, const Vector4d &p, const Vector4d &d, const Vector4d &uvnTarget, float nearZ, float farZ, float fovAngle, float width, float height) :_type(t) ,_pos(p) ,_direction(d) ,_target(uvnTarget) ,_nearClipZ(nearZ) ,_farClipZ(farZ) ,_fov(fovAngle) ,_viewportWidth(width) ,_viewportHeight(height) ,u(Vector4d(1, 0, 0, 0)) ,v(Vector4d(0, 1, 0, 0)) ,n(Vector4d(0, 0, 1, 0)) ,_worldToCam(Matrix4d::IDENTITY) ,_camToProj(Matrix4d::IDENTITY) ,_projToView(Matrix4d::IDENTITY) { _viewportCenterX = (_viewportWidth - 1) / 2; _viewportCenterY = (_viewportHeight - 1) / 2; _aspectRadio = _viewportWidth / _viewportHeight; _viewPlaneWidth = 2; _viewPlaneHeight = 2/_aspectRadio; float tanFovDiv2 = tan(DEG_TO_RAD(_fov / 2)); _viewDishH = _viewDish = 0.5 * _viewPlaneWidth * tanFovDiv2; _viewDishV = 0.5 * _viewPlaneHeight * tanFovDiv2; if (_fov == 90.0) { _rightClipPlane.normal = Vector3d(1, 0, -1); _leftClipPlane.normal = Vector3d(-1, 0, -1); _upClipPlane.normal = Vector3d(0, 1, -1); _downClipPlane.normal = Vector3d(0, -1, -1); }else { _rightClipPlane.normal = Vector3d(_viewDish, 0, -_viewPlaneWidth / 2); _leftClipPlane.normal = Vector3d(-_viewDish, 0, -_viewPlaneWidth / 2); _upClipPlane.normal = Vector3d(0, _viewDish, -_viewPlaneWidth / 2); _downClipPlane.normal = Vector3d(0, -_viewDish, -_viewPlaneWidth / 2); } }
void test_stable_norm() { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( stable_norm(Matrix<float, 1, 1>()) ); CALL_SUBTEST_2( stable_norm(Vector4d()) ); CALL_SUBTEST_3( stable_norm(VectorXd(internal::random<int>(10,2000))) ); CALL_SUBTEST_4( stable_norm(VectorXf(internal::random<int>(10,2000))) ); CALL_SUBTEST_5( stable_norm(VectorXcd(internal::random<int>(10,2000))) ); } }
bool Camera::nearClippingPlane(Vector3d *normal, Vector3d *point) { // Determine near plane from three coplanar points: // (http://www.songho.ca/opengl/gl_projectionmatrix.html is a // helpful resource here.) // We will convert following points (which are in the near plane) // from NDC coordinates to object coordinates: // // (-1, -1, -1), (1,-1,-1), and (-1,1,-1). // // First get the current transformation matrix (T = PM, P is // projection matrix, M is modelview matrix), which converts // Object coordinates (O) to NDC coordinates (N) via: // // N = T O // // These are stored in the private class, no need to query OpenGL // for them. const Matrix4d &proj = d->projection.matrix(); const Matrix4d &modv = d->modelview.matrix(); // Now invert the matrix so that we can find our three coplanar // points in Object coordinates via: // // O = Inv(T) N // // Calculate T ( = PM ) here, too: const Matrix4d invT ((proj * modv).inverse()); // Now to get three points and a normal vector: // (V4toV3DivW converts {x,y,z,w} to {x,y,z}/w) *point = V4toV3DivW(invT * Vector4d(-1,-1,-1,1) ); const Vector3d p1 ( V4toV3DivW(invT * Vector4d(1,-1,-1,1) )); const Vector3d p2 ( V4toV3DivW(invT * Vector4d(-1,1,-1,1) )); // This cross product ensures that the normal points into the // viewing volume: *normal = (p2-(*point)).cross(p1-(*point)).normalized(); return true; }
Vector4d Vector4d::normalized() const { double length = abs(); return Vector4d ( m_elements[0] / length, m_elements[1] / length, m_elements[2] / length, m_elements[3] / length ); }
TEST(cam_loader, loadCameraFromFile) { Camera c = loadCameraFromFile("cam_file.cam"); // location EXPECT_FLOAT_EQ(c.position.orig[0], 4.0); EXPECT_FLOAT_EQ(c.position.orig[1], 5.0); EXPECT_FLOAT_EQ(c.position.orig[2], 6.0); // direction Vector4d expected = Vector4d(1.0, 0.5, 1.1, 0.0); expected.normalize(); EXPECT_LT((c.position.dir - expected).norm(), 0.00001) << "direction vector"; // up direction Vector4d expected_up = Vector4d(1, 2, 3, 0).normalized(); EXPECT_FLOAT_EQ((c.up - expected_up).norm(), 0.0) << "up vector"; // dimensions EXPECT_DOUBLE_EQ(c.worldWidth, 10) << "world width"; EXPECT_DOUBLE_EQ(c.worldHeight, 20) << "world height"; }
Vector4d CProxyCamera::unproject(const Vector3d& icoord) const { const Matrix3d &A = m_KR; Vector3d b(icoord[0], icoord[1], icoord[2]); for (int y = 0; y < 3; ++y) { b[y] -= m_KT[y]; } Matrix3d IA = A.inverse(); Vector3d x = IA * b; return Vector4d(x[0],x[1],x[2],1.0f); }
Vector4d CProxyCamera::intersect(const Vector4d &coord, const Vector4d& abcd) const { Vector4d ray = m_center - coord; const double A = coord.dot(abcd); const double B = ray.dot(abcd); if (fabs(B)<1e-8) return Vector4d(0.0f, 0.0f, 0.0f, -1.0f); else return coord - A / B * ray; }
TEST(HitPointInterval, ShouldTransformInterval) { HitPointInterval interval; HitPoint hitPoint1(box, 2, Vector3d(1, 0, 0), Vector3d(0, 1, 0)); HitPoint hitPoint2(box, 3, Vector3d(2, 0, 0), Vector3d(0, 1, 0)); interval.add(hitPoint1, hitPoint2); Matrix4d pointMatrix = Matrix3d::rotateZ(1_radians); Matrix3d normalMatrix = Matrix3d::rotateX(1_radians); HitPointInterval transformed = interval.transform(pointMatrix, normalMatrix); ASSERT_EQ(Vector3d(pointMatrix * Vector4d(1, 0, 0)), transformed.min().point()); }
// initializes the rays that have previously been declared void Camera::generateImagePlane(){ //generate u,v,w //set w to be exactly opposite of the viewing direction, and make sure it is normalized wVec = -d.normalize(); // create a temporary vector for 'up' Vector4d vTemp(0,1,0); //set v to be perpendicular to w and the 'up' vector uVec = vTemp.crossProduct(wVec).normalize(); //finally, set u to be perpendicular to both v and w. This should be roughly representative of the 'up' vector, but part of the orthonormal basis. vVec = wVec.crossProduct(uVec).normalize(); //compute l, r, b, & t //calculate modifier for field of view first... float pi = std::atan(1.0f) * 4.0f; float fovMod = tan(fov*pi/(2*180)); //cout << "Fov Mod = " << fovMod << endl; r = ((float)width/(float)height) * fovMod * (focalLength); l = -r; t = fovMod * (focalLength); b = -t; //delcare u & v, the pixel locations for the current pixel float u, v; Vector4d tempVector; srand48 (time (0)); for (int heightCounter = 0; heightCounter < height * sampleMultiplier; heightCounter++){ for (int widthCounter = 0; widthCounter < width * sampleMultiplier; widthCounter++){ u = l + (r - l)*(widthCounter + (drand48() - 0.5))/(width * sampleMultiplier); v = b + (t - b)*(heightCounter + (drand48() - 0.5))/(height * sampleMultiplier); //set ray direction tempVector = Vector4d(wVec*(-focalLength) + uVec * u + vVec * v).normalize(); //set ray origin rays[width * sampleMultiplier * heightCounter + widthCounter] = Ray4d(e, tempVector); rays[width * sampleMultiplier * heightCounter + widthCounter].sampleX = heightCounter % sampleMultiplier; rays[width * sampleMultiplier * heightCounter + widthCounter].sampleY = widthCounter % sampleMultiplier; rays[width * sampleMultiplier * heightCounter + widthCounter].maxSampleX = sampleMultiplier; rays[width * sampleMultiplier * heightCounter + widthCounter].maxSampleY = sampleMultiplier; } } }
void Camera::createUVNMatrix(Camera::UVNMode mode) { Matrix4d t(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, -_pos.x, -_pos.y, -_pos.z, 1); if(mode == UVN_MODE_SPHERICAL) { float phi = _direction.x; //仰角 float theta = _direction.y;//方位角 float sinPhi = sin(phi); float cosPhi = cos(phi); float sinTheta = sin(theta); float cosTheta = cos(theta); _target.x = -sinPhi * sinTheta; _target.y = cosPhi; _target.z = sinPhi * cosTheta; } n = Vector4d(_pos, _target); v = Vector4d(0, 1, 0, 0); u = v.cross(n); v = n.cross(u); u.normalize(); v.normalize(); n.normalize(); Matrix4d r(u.x, v.x, n.x, 0, u.y, v.y, n.y, 0, u.z, v.z, n.z, 0, 0, 0, 0, 1); _worldToCam = t * r; }
Vector4d Vector4d::homogenized() const { if( m_elements[3] != 0 ) { return Vector4d ( m_elements[0] / m_elements[3], m_elements[1] / m_elements[3], m_elements[2] / m_elements[3], 1 ); } else { return Vector4d ( m_elements[0], m_elements[1], m_elements[2], m_elements[3] ); } }
void test_nullary() { CALL_SUBTEST_1( testMatrixType(Matrix2d()) ); CALL_SUBTEST_2( testMatrixType(MatrixXcf(internal::random<int>(1,300),internal::random<int>(1,300))) ); CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random<int>(1,300),internal::random<int>(1,300))) ); for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_4( testVectorType(VectorXd(internal::random<int>(1,300))) ); CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232 CALL_SUBTEST_6( testVectorType(Vector3d()) ); CALL_SUBTEST_7( testVectorType(VectorXf(internal::random<int>(1,300))) ); CALL_SUBTEST_8( testVectorType(Vector3f()) ); CALL_SUBTEST_8( testVectorType(Matrix<float,1,1>()) ); } }
void Depth::setDepthAt(const Vector2d &pix, double v, const bool over_write) { int xl = floor(pix[0]), xh = round(pix[0] + 0.5); int yl = floor(pix[1]), yh = round(pix[1] + 0.5); double lm = pix[0] - (double) xl, rm = (double) xh - pix[0]; double tm = pix[1] - (double) yl, bm = (double) yh - pix[1]; Vector4d w(rm * bm, lm * bm, lm * tm, rm * tm); Vector4d w_ori(getWeightAt(xl, yl), getWeightAt(xh, yl), getWeightAt(xh, yh), getWeightAt(xl, yh)); if(over_write){ w_ori = Vector4d(0,0,0,0); } Vector4d ind(xl + yl * getWidth(), xh + yl * getWidth(), xh + yh * getWidth(), xl + yh * getWidth()); for (int i = 0; i < 4; i++) { if (w_ori[i] + w[i] != 0) { data[ind[i]] = (data[ind[i]] * w_ori[i] + v * w[i]) / (w_ori[i] + w[i]); setWeightAt(ind[i], w_ori[i] + w[i]); } } }
void CSimuVertexRingObj::updateRotationQuaternionForAllElements(const unsigned int tm, const bool needQuat) { //compute the rotation for each vertex const int BUFFERLEN = 20000; int failbuffer[BUFFERLEN]; int i, c = 0; assert(m_nVRingElementCount==m_nVertexCount); const Vector3d *pVertex = &m_pVertInfo[0].m_pos; const int stride = sizeof(CSimuEntity::VertexInfo); for (i=0; i<m_nVRingElementCount; i++){ CVertexRingElement& elm = m_pVRingElement[i]; if (!elm.computeRotationQuaternionSolidOrShell(0, pVertex, stride)){ failbuffer[c++] = i; elm.m_rotTime = 0; } else{ elm.m_rotTime = tm; } } //make up those failed vertices; assert(c<BUFFERLEN); for (i=0; i<c; i++){ const int vid = failbuffer[i]; CVertexRingElement& elm = m_pVRingElement[vid]; Vector4d q(0,0,0,0); int count = 0; for (int j=0; j<elm.getRodNumber(); j++){ const int v = elm.m_pVertexRingNode[j].m_nVertexID; CVertexRingElement& elm1 = m_pVRingElement[v]; const unsigned int tm1 = elm1.m_rotTime; if (tm1 == tm){ q += elm1.m_quat; count ++; } } if (count>0){ Quaternion qq(q[0], q[1], q[2], q[3]); qq.normalize(); elm.m_quat = Vector4d(qq[0], qq[1], qq[2], qq[3]); } } }
void Remove_Backfaces_OBJECT4D(OBJECT4D_PTR obj, CAM4D_PTR cam) { if (obj->state & OBJECT4D_STATE_CULLED) return; int count = 0; int count2 = 0; for (int poly = 0; poly < obj->num_polys; poly++) { POLY4D_PTR curr_poly = &obj->plist[poly]; if (!(curr_poly->state & POLY4D_STATE_ACTIVE) || (curr_poly->state & POLY4D_STATE_CLIPPED) || (curr_poly->attr & POLY4D_ATTR_2SIDED) || (curr_poly->state & POLY4D_STATE_BACKFACE)) { continue; } count2++; int vindex0 = curr_poly->vert[0]; int vindex1 = curr_poly->vert[1]; int vindex2 = curr_poly->vert[2]; Vector4d t; Vector3d u, v, n; t = obj->vlist_trans[vindex0] - obj->vlist_trans[vindex1]; u = Vector3d(t(0), t(1), t(2)); t = obj->vlist_trans[vindex0] - obj->vlist_trans[vindex2]; v = Vector3d(t(0), t(1), t(2)); n = u.cross(v); Vector4d view; //view = obj->vlist_trans[vindex0] - cam->pos; view = Vector4d(0, 0, 1, 0); float dp = n(0)*view(0) + n(1)*view(1) + n(2)*view(2); if (dp >= 0.0) { curr_poly->state |= POLY4D_STATE_BACKFACE; } } }
void makeCube(std::vector<Vector3d>* vertices, std::vector<Vector4d>* colors, std::vector<Vector2d>* textures, std::vector<size_t>* triangles) { vertices->resize(numVertices); colors->resize(numVertices); textures->resize(numVertices); double scale = 1.0; for (int i=0; i<numVertices; ++i) { (*vertices)[i] = Vector3d(vertexData[3*i]*scale, vertexData[3*i+1]*scale, vertexData[3*i+2]*scale); (*colors)[i] = Vector4d(colorData[4*i], colorData[4*i+1], colorData[4*i+2], colorData[4*i+3]); (*textures)[i] = Vector2d(textureData[2*i], textureData[2*i+1]); } triangles->resize(numTriangles*3); std::copy(triangleData, triangleData+12*3,std::begin(*triangles)); }