Esempio n. 1
0
void Plane::MakeGizmo()
{
	float lineSegmentLength = 300;
	glm::vec3 planeNormal(normal.x, normal.y, normal.z);
	glm::vec3 parallel1(0, 0, 0);
	glm::vec3 parallel2(0, 0, 0);


	if (planeNormal == glm::vec3(0, 0, 1))
		parallel1 = glm::vec3(0, 1, 0);
	else
		parallel1 = glm::vec3(0, 0, 1);

	//get vectors that are perpendecular to the plan
	parallel1 = glm::cross(planeNormal, parallel1);
	parallel2 = glm::cross(planeNormal, parallel1);

	glm::vec3 centrePoint = planeNormal * normal.w;
	parallel1 = glm::normalize(parallel1);
	parallel2 = glm::normalize(parallel2);

	glm::vec3 start = centrePoint + parallel1 * lineSegmentLength;
	glm::vec3 end = centrePoint - parallel1 * lineSegmentLength;
	Gizmos::addLine(start, end, color);

	start = centrePoint + parallel2 * lineSegmentLength;
	end = centrePoint - parallel2 * lineSegmentLength;
	Gizmos::addLine(start, end, color);
}
//----------------------------------------------------------------------------
void vesKiwiImageWidgetRepresentation::scrollImageSlice(double deltaX, double deltaY)
{
  deltaY *= -1;

  vesSharedPtr<vesRenderer> ren = this->renderer();
  vesVector3f viewFocus = ren->camera()->focalPoint();
  vesVector3f viewFocusDisplay = ren->computeWorldToDisplay(viewFocus);
  float focalDepth = viewFocusDisplay[2];

  this->Internal->LastTouchPosition += vesVector2f(deltaX, deltaY);
  vesVector3f worldTouchPosition = ren->computeDisplayToWorld(vesVector3f(this->Internal->LastTouchPosition[0], this->Internal->LastTouchPosition[1], focalDepth));
  worldTouchPosition -= this->Internal->GrabOffset.cast<float>();

  int flatDimension = this->Internal->SelectedImageDimension;
  vesVector3f planeNormal(0, 0, 0);
  planeNormal[flatDimension] = 1.0;

  int extent[6];
  vesVector3d spacing;
  vesVector3d origin;
  this->imageData()->GetOrigin(origin.data());
  this->imageData()->GetSpacing(spacing.data());
  this->imageData()->GetExtent(extent);

  double distanceAlongInteractionAxis = worldTouchPosition.dot(planeNormal);
  double distanceFromOriginAlongAxis = distanceAlongInteractionAxis - origin[flatDimension];
  int slicesFromOrigin = distanceFromOriginAlongAxis / spacing[flatDimension] - extent[flatDimension*2]*spacing[flatDimension];

  this->scheduleSetSliceIndex(flatDimension, slicesFromOrigin);
}
Esempio n. 3
0
void PerformanceTesting::Test(
	const std::string & cardir,
	const std::string & carname,
	ContentManager & content,
	std::ostream & info_output,
	std::ostream & error_output)
{
	info_output << "Beginning car performance test on " << carname << std::endl;

	// init track
	assert(!track);
	assert(!plane);
	btVector3 planeNormal(0, 0, 1);
	btScalar planeConstant = 0;
	plane = new btStaticPlaneShape(planeNormal, planeConstant);
	plane->setUserPointer(static_cast<void*>(&surface));
	track = new btCollisionObject();
	track->setCollisionShape(plane);
	track->setActivationState(DISABLE_SIMULATION);
	track->setUserPointer(static_cast<void*>(&surface));
	world.addCollisionObject(track);

	//load the car dynamics
	std::tr1::shared_ptr<PTree> cfg;
	content.load(cfg, cardir, carname + ".car");
	if (!cfg->size())
	{
		return;
	}

	// position is the center of a 2 x 4 x 1 meter box on track surface
	btVector3 pos(0.0, -2.0, 0.5);
	btQuaternion rot = btQuaternion::getIdentity();
	const std::string tire = "";
	const bool damage = false;
	if (!car.Load(*cfg, cardir, tire, pos, rot, damage, world, content, error_output))
	{
		return;
	}

	info_output << "Car dynamics loaded" << std::endl;
	info_output << carname << " Summary:\n" <<
			"Mass (kg) including driver and fuel: " << 1 / car.GetInvMass() << "\n" <<
			"Center of mass (m): " << car.GetCenterOfMass() << std::endl;

	std::ostringstream statestream;
	joeserialize::BinaryOutputSerializer serialize_output(statestream);
	if (!car.Serialize(serialize_output))
	{
		error_output << "Serialization error" << std::endl;
	}
	//else info_output << "Car state: " << statestream.str();
	carstate = statestream.str();

	TestMaxSpeed(info_output, error_output);
	TestStoppingDistance(false, info_output, error_output);
	TestStoppingDistance(true, info_output, error_output);

	info_output << "Car performance test complete." << std::endl;
}
Esempio n. 4
0
  cVECTOR3 tFACET<_Mesh_Entities>::Normal() const
  {
  /* BB * I don't see any division by zero. */

    cVECTOR3 planeNormal(0.0, 0.0, 0.0);
    const cHALF_EDGE* heInit = HalfEdge();
    const cHALF_EDGE* heCurr = heInit;
    const cHALF_EDGE* heNext = heCurr->Next();

    do {
  	const cPOINT3& point0 = heCurr->Vertex()->Point();
  	const cPOINT3& point1 = heNext->Vertex()->Point();

    	for (INT i = 0 ; i < 3; i++){
    	  INT i1 = i + 1;
    	  if (i1 > 2) i1 -= 3;
    	  INT i2 = i + 2;
    	  if (i2 > 2) i2 -= 3;
    	  planeNormal[i] += (point0[i1]-point1[i1])*(point0[i2]+point1[i2]);
    	}

    	heCurr = heNext;
    	heNext = heNext->Next();
    } while(heCurr != heInit);

    return planeNormal;
  }
Esempio n. 5
0
float MwMaths::PointToPlaneDistance(MwVector3 &point, MwVector4 &plane)
{
	// TODO: sophisticate the straightforward division by plane.z, make conversion functions for plane of Vector3s and of Vector4
	MwVector3 planePoint(0.0f, 0.0f, -plane.w / plane.z);
    MwVector3 planeNormal(plane.x, plane.y, plane.z);

	MwVector3::Normalize(planeNormal, planeNormal);

    return -RayToPerpendicularPlaneDistance(point, planeNormal, planePoint);
}
Esempio n. 6
0
void AutoTriangleMesh<PointType>::calcNormal(const typename AutoTriangleMesh<PointType>::Vertex* vPtr,float normal[3]) const
	{
	/* Set normal vector to zero: */
	for(int i=0;i<3;++i)
		normal[i]=0.0f;
	
	/* Iterate through vertex' platelet: */
	const Edge* ve=vPtr->getEdge();
	do
		{
		const Edge* ve2=ve->getFacePred()->getOpposite();
		float triangleNormal[3];
		planeNormal(*ve->getStart(),*ve->getEnd(),*ve2->getEnd(),triangleNormal);
		for(int i=0;i<3;++i)
			normal[i]+=triangleNormal[i];

		/* Go to next edge around vertex: */
		ve=ve2;
		}
	while(ve!=vPtr->getEdge());
	}
void ZBspTree::_GetSortedFaceList( D3DXPLANE* pPlane, ZBspFace* pFaces, ZBspFace** fr , ZBspFace** bk )
{
	ZBspFace * pFront = NULL, * pBack = NULL, * pNext = NULL;
	ZBspFace * pCurrentFace = NULL;
	if( !pFaces )
	{
		*fr = NULL;
		*bk = NULL;
		return;
	}

	for( pCurrentFace = pFaces ; pCurrentFace ; pCurrentFace = pFaces )
	{
		pFaces = pFaces->GetNext();
		D3DXVECTOR3 planeNormal( pPlane->a, pPlane->b, pPlane->c );
		D3DXVECTOR3 currentNormal( pCurrentFace->GetPlane()->a, pCurrentFace->GetPlane()->b, pCurrentFace->GetPlane()->c );
		float val;
		int res = ZClassifyByPlane::WhereIsFace( pPlane, pCurrentFace->GetVerts(), pCurrentFace->GetVertCount() );
		switch( res )
		{
			case ZClassifyByPlane::FRONT:
				pCurrentFace->AddNext( pFront );
				pFront = pCurrentFace;
				break;

			case ZClassifyByPlane::BACK:
				pCurrentFace->AddNext( pBack );
				pBack = pCurrentFace;
				break;

			case ZClassifyByPlane::ON:
				pCurrentFace->SetUsed( TRUE );

				val = D3DXVec3Dot( &planeNormal, &currentNormal );
				if( val >= 0.0f )
				{
					pCurrentFace->AddNext( pFront );
					pFront = pCurrentFace;
				}
				else
				{
					pCurrentFace->AddNext( pBack );
					pBack = pCurrentFace;
				}
				break;

			case ZClassifyByPlane::SPLIT:
				ZBspFace * front = NULL, * back = NULL;
				_Split( pPlane, pCurrentFace, &front, &back );
				if( !front && !back )
					break;
				if( front->GetNext() )
				{
					front->GetNext()->AddNext( pFront );
					pFront = front;
				}
				else 
				{
					front->AddNext( pFront );
					pFront = front;
				}
				if( back->GetNext() )
				{
					back->GetNext()->AddNext( pBack );
					pBack = back;
				}
				else
				{
					back->AddNext( pBack );
					pBack = back;
				}

				// 분할된 face는 새롭게 front, back으로 분할되었으므로
				// 메모리에서 반드시 삭제해야한다.
				pCurrentFace->AddNext( NULL );
				delete pCurrentFace;
				break;
		}
	}
	*fr = pFront;
	*bk = pBack;
}
Esempio n. 8
0
void SpheresDemo::setupScene(const ConstructionInfo& ci)
{
	
	
	if (1)
	{
		btSphereShape* sphere = new btSphereShape(1);
			m_collisionShapes.push_back(sphere);

		/// Create Dynamic Objects
		btTransform startTransform;
		startTransform.setIdentity();

	

		float start_x = START_POS_X - ci.gapX*ci.arraySizeX/2;
		float start_y = START_POS_Y;
		float start_z = START_POS_Z - ci.gapZ*ci.arraySizeZ/2;

		for (int k=0;k<ci.arraySizeY;k++)
		{
			int sizeX = ci.arraySizeX;
			int startX = -sizeX/2;
			float gapX = ci.gapX;

			for (int i=0;i<sizeX;i++)
			{
				int sizeZ = ci.arraySizeZ;
				int startZ = -sizeX/2;
				float gapZ =ci.gapZ;
				for(int j = 0;j<sizeZ;j++)
				{
					//btCollisionShape* shape = k==0? boxShape : colShape;

					btCollisionShape* shape = sphere;

					
					btScalar	mass  = 1;
					if (!ci.m_useConcaveMesh && k==0)
						mass = k==0? 0.f : 1.f;

					//rigidbody is dynamic if and only if mass is non zero, otherwise static
					bool isDynamic = (mass != 0.f);

					btVector3 localInertia(0,0,0);
					if (isDynamic)
						shape->calculateLocalInertia(mass,localInertia);

					startTransform.setOrigin(SCALING*btVector3(
										btScalar(gapX*i + start_x),
										btScalar(ci.gapY*k + start_y),
										btScalar(gapZ*j + start_z)));

			
					//using motionstate is recommended, it provides interpolation capabilities, and only synchronizes 'active' objects
					btDefaultMotionState* myMotionState = new btDefaultMotionState(startTransform);
					btRigidBody::btRigidBodyConstructionInfo rbInfo(mass,myMotionState,shape,localInertia);
					btRigidBody* body = new btRigidBody(rbInfo);
					

					m_dynamicsWorld->addRigidBody(body);
				}
			}
		}
	}

	{
		btVector3 planeNormal(0,1,0);
		btScalar planeConstant=0;

		btCollisionShape* shape = new btStaticPlaneShape(planeNormal,planeConstant);
		//btBoxShape* plane = new btBoxShape(btVector3(100,1,100));
		//plane->initializePolyhedralFeatures();
		//btSphereShape* shape = new btSphereShape(1000);

		btScalar mass(0.);

		//rigidbody is dynamic if and only if mass is non zero, otherwise static
		bool isDynamic = (mass != 0.f);

		btVector3 localInertia(0,0,0);
		btTransform groundTransform;
		groundTransform.setIdentity();
		groundTransform.setRotation(btQuaternion(btVector3(1,0,0),0.3));
		groundTransform.setOrigin(btVector3(0,0,0));

		//using motionstate is recommended, it provides interpolation capabilities, and only synchronizes 'active' objects
		btDefaultMotionState* myMotionState = new btDefaultMotionState(groundTransform);
		btRigidBody::btRigidBodyConstructionInfo rbInfo(mass,myMotionState,shape,localInertia);
		btRigidBody* body = new btRigidBody(rbInfo);

		//add the body to the dynamics world
		m_dynamicsWorld->addRigidBody(body);
	}
}
Esempio n. 9
0
void VRMeshEditor::renderMesh(VRMeshEditor::DataItem* dataItem) const
{
    /* Check if vertex and triangle arrays are large enough: */
    unsigned int numVertices=mesh->getNextVertexIndex();
    if(dataItem->numVertices<numVertices)
    {
        delete[] dataItem->vertices;
        dataItem->numVertices=numVertices+(numVertices/2);
        dataItem->vertices=new MyVertex[dataItem->numVertices];
    }
    unsigned int numTriangles=mesh->getNumFaces();
    if(dataItem->numTriangles<numTriangles)
    {
        delete[] dataItem->triangles;
        dataItem->numTriangles=numTriangles+(numTriangles/2);
        dataItem->triangles=new unsigned int[dataItem->numTriangles*3];
    }

    /* Reset vertex array: */
    for(MyMesh::ConstVertexIterator vIt=mesh->beginVertices(); vIt!=mesh->endVertices(); ++vIt)
    {
        MyVertex* vPtr=&dataItem->vertices[vIt->index];
        for(int i=0; i<3; ++i)
        {
            vPtr->normal[i]=0.0f;
            vPtr->position[i]=(*vIt)[i];
        }
    }

    /* Traverse triangles once to calculate normal vectors for smooth shading: */
    unsigned int* viPtr=dataItem->triangles;
    for(MyMesh::ConstFaceIterator fIt=mesh->beginFaces(); fIt!=mesh->endFaces(); ++fIt,viPtr+=3)
    {
        /* Gather triangle's points: */
        const MyMesh::Vertex* v[3];
        const MyMesh::Edge* e=fIt->getEdge();
        for(int i=0; i<3; ++i)
        {
            v[i]=e->getStart();
            viPtr[i]=v[i]->index;
            e=e->getFaceSucc();
        }

        /* Calculate triangle's normal vector: */
        float normal[3];
        planeNormal(*v[0],*v[1],*v[2],normal);

        /* Distribute normal vector to triangle's vertices: */
        for(int i=0; i<3; ++i)
        {
            MyVertex* vPtr=&dataItem->vertices[viPtr[i]];
            for(int j=0; j<3; ++j)
                vPtr->normal[j]+=normal[j];
        }
    }

#if 0
    static bool saveVertices=true;
    if(saveVertices)
    {
#if 0
        float min[3],max[3];
        MyMesh::ConstVertexIterator vIt=mesh->beginVertices();
        for(int i=0; i<3; ++i)
            min[i]=max[i]=vIt->pos()[i];
        for(++vIt; vIt!=mesh->endVertices(); ++vIt)
            for(int i=0; i<3; ++i)
            {
                if(min[i]>vIt->pos()[i])
                    min[i]=vIt->pos()[i];
                else if(max[i]<vIt->pos()[i])
                    max[i]=vIt->pos()[i];
            }
        float center[3];
        for(int i=0; i<3; ++i)
            center[i]=(min[i]+max[i])*0.5f;
        float scale=60.0f/(max[0]-min[0]);
        for(int i=1; i<3; ++i)
        {
            float scale2=60.0f/(max[i]-min[i]);
            if(scale>scale2)
                scale=scale2;
        }
#endif

#if 0
        FILE* sampleFile=fopen("../ScatteredData/ModelVertices.txt","wt");
        fprintf(sampleFile,"%u 3 1\n",numVertices);
        for(MyMesh::ConstVertexIterator vIt=mesh->beginVertices(); vIt!=mesh->endVertices(); ++vIt)
        {
            MyVertex* vPtr=&dataItem->vertices[vIt->index];
            float pos[3];
            for(int i=0; i<3; ++i)
                pos[i]=vPtr->position[i];
            //pos[i]=(vPtr->position[i]-center[i])*scale+64.0f;
            float normLen=sqrtf(vPtr->normal[0]*vPtr->normal[0]+vPtr->normal[1]*vPtr->normal[1]+vPtr->normal[2]*vPtr->normal[2]);
            fprintf(sampleFile,"%8.3f %8.3f %8.3f 0 0 0 %6.3f %6.3f %6.3f\n",pos[0],pos[1],pos[2],vPtr->normal[0]/normLen,vPtr->normal[1]/normLen,vPtr->normal[2]/normLen);
        }
        fclose(sampleFile);
#endif

        FILE* triangleFile=fopen("../../Teaching/ECS175/Project2/SubdivisionModel.tris","wt");
        fprintf(triangleFile,"color %5.3f, %5.3f, %5.3f\n",0.5f,0.5f,0.5f);

        for(MyMesh::ConstFaceIterator fIt=mesh->beginFaces(); fIt!=mesh->endFaces(); ++fIt,viPtr+=3)
        {
            fprintf(triangleFile,"beginPolygon\n");

            /* Gather triangle's points: */
            const MyMesh::Vertex* v[3];
            const MyVertex* vs[3];
            const MyMesh::Edge* e=fIt->getEdge();
            for(int i=0; i<3; ++i)
            {
                v[i]=e->getStart();
                vs[i]=&dataItem->vertices[v[i]->index];
                e=e->getFaceSucc();
            }

            float normal[3];
            planeNormal(*v[0],*v[1],*v[2],normal);
            fprintf(triangleFile,"normal %6.3f, %6.3f, %6.3f\n",normal[0],normal[1],normal[2]);

            for(int i=0; i<3; ++i)
            {
                //fprintf(triangleFile,"normal %6.3f, %6.3f, %6.3f\n",vs[i]->normal[0],vs[i]->normal[1],vs[i]->normal[2]);
                fprintf(triangleFile,"vertex %8.3f, %8.3f, %8.3f\n",vs[i]->position[0],vs[i]->position[1],vs[i]->position[2]);
            }

            fprintf(triangleFile,"end\n");
        }

        fclose(triangleFile);

        saveVertices=false;
    }
#endif

    /* Traverse triangles again to render: */
    glEnableClientState(GL_NORMAL_ARRAY);
    glEnableClientState(GL_VERTEX_ARRAY);
    glVertexPointer(dataItem->vertices);
    glDrawElements(GL_TRIANGLES,numTriangles*3,GL_UNSIGNED_INT,dataItem->triangles);
    glDisableClientState(GL_NORMAL_ARRAY);
    glDisableClientState(GL_VERTEX_ARRAY);
}
Esempio n. 10
0
int  main()
{
    //Specify data directory
    //const std::string dataDirectory = "/Users/ervislilaj/Desktop/data/SimudPuteh";
    const std::string dataDirectory = "/Users/ervislilaj/Desktop/data/SmallCaveDownsampled";
    
    //Calculate paths
    const std::string outputDirectory = dataDirectory + "/output";
    
    const std::string offFile = dataDirectory + "/model.off";
    const std::string skeletonFile = dataDirectory + "/model.skel";
    
    const std::string segmentationFile = dataDirectory + "/segmentation.seg";
    const std::string segmentationFile2 = dataDirectory + "/segmentation2.seg";
    
    
    
    //Read files
    typedef std::vector<Triangle> TriangleList;
    std::vector<Eigen::Vector3f> vertices;
    std::vector<Eigen::Vector3f> newVertices /*(vertices.size()*2)*/;
    TriangleList triangles;
    std::vector<Eigen::Vector3f> border;
    std::vector<CurveSkeleton::Vertex> skeletonVertices;
    
    
    std::vector<Vertex> gang;
    std::vector<Eigen::Vector3f> borderVertices;
    std::vector<Eigen::Vector3f> intersectionVertexForTriangulation;
    
    std::vector<IndexedTriangle> triIndices;
    std::vector<IndexedTriangle> borderIndices;
    std::vector<IndexedTriangle> labelIndices;
    std::vector<IndexedTriangle> leerIndices;
    
    std::vector<Eigen::Vector3f> verwe;
    
    std::vector<int32_t> segmentation;
    std::vector<int32_t> segmentation2;
    std::vector<double> doubleSeg;
    std::vector<int> labels;
    
    
    //step & iteration parameter
    int iter = 490;
    double step = 0.001;

    std::cout << "Loading mesh file..." << std::endl;
    ReadOff(offFile, vertices, triangles, triIndices);
    
    std::cout << "Loading skeleton..." << std::endl;
    CurveSkeleton* skeleton = LoadCurveSkeleton(skeletonFile.c_str());
    
    std::cout << "Loading segmentation..." << std::endl;
    ReadSegmentation(segmentationFile2, segmentation2, skeleton);
    //copy
    
    std::vector<Vertex> vec2;
    
    std::cout << "Calculating inverse correspondences..." << std::endl;
    //Calculate inverse correspondences
    std::vector<unsigned int> meshVertexCorrespondsTo(vertices.size());
    int iVert = -1;
    for (auto& vert : skeleton->vertices)
    {
        ++iVert;
        for (auto c : vert.correspondingOriginalVertices)
        {
            meshVertexCorrespondsTo[c] = iVert;
        }
        
    }
    
    for (int i = 0; i < segmentation2.size(); ++i) {
        doubleSeg.push_back(segmentation2[i]);
    }
    
    for (int i = 0; i < vertices.size(); ++i) {
        Vertex* new_v = new Vertex();
        new_v->p = vertices[i];
        new_v->c = doubleSeg[meshVertexCorrespondsTo[i]] + 0.5;
        newVertices.push_back(vertices[i]);
        vec2.push_back(new_v[0]);
    }
    
    
    cgv::math::union_find ufTri((int)vertices.size());
    
    borderIndices.clear();
    labels.clear();
    labelIndices.clear();
    
    
    //set neighbors id
    for (int i = 0; i < triIndices.size(); ++i)
    {
        for (int j = 0; j < triIndices.size(); ++j)
        {
            if(i==j);
            else{
                if (((triIndices[j].i[0] == triIndices[i].i[0]) && (triIndices[j].i[1] == triIndices[i].i[1])) ||
                    ((triIndices[j].i[1] == triIndices[i].i[0]) && (triIndices[j].i[0] == triIndices[i].i[1])) ||
                    
                    ((triIndices[j].i[1] == triIndices[i].i[0]) && (triIndices[j].i[2] == triIndices[i].i[1])) ||
                    ((triIndices[j].i[2] == triIndices[i].i[0]) && (triIndices[j].i[1] == triIndices[i].i[1])) ||
                    
                    ((triIndices[j].i[2] == triIndices[i].i[0]) && (triIndices[j].i[0] == triIndices[i].i[1])) ||
                    ((triIndices[j].i[0] == triIndices[i].i[0]) && (triIndices[j].i[2] == triIndices[i].i[1])))
                {
                    //  neighbor 1
                    triIndices[i].n[0] = &triIndices[j];
                }
                
                if  (((triIndices[j].i[0] == triIndices[i].i[1]) && (triIndices[j].i[1] == triIndices[i].i[2])) ||
                     ((triIndices[j].i[1] == triIndices[i].i[1]) && (triIndices[j].i[0] == triIndices[i].i[2])) ||
                     
                     ((triIndices[j].i[1] == triIndices[i].i[1]) && (triIndices[j].i[2] == triIndices[i].i[2])) ||
                     ((triIndices[j].i[2] == triIndices[i].i[1]) && (triIndices[j].i[1] == triIndices[i].i[2])) ||
                     
                     ((triIndices[j].i[2] == triIndices[i].i[1]) && (triIndices[j].i[0] == triIndices[i].i[2])) ||
                     ((triIndices[j].i[0] == triIndices[i].i[1]) && (triIndices[j].i[2] == triIndices[i].i[2])))
                {
                    //  neighbor 2
                    triIndices[i].n[1] = &triIndices[j];
                }
                
                if  (((triIndices[j].i[0] == triIndices[i].i[2]) && (triIndices[j].i[1] == triIndices[i].i[0])) ||
                     ((triIndices[j].i[1] == triIndices[i].i[2]) && (triIndices[j].i[0] == triIndices[i].i[0])) ||
                     
                     ((triIndices[j].i[1] == triIndices[i].i[2]) && (triIndices[j].i[2] == triIndices[i].i[0])) ||
                     ((triIndices[j].i[2] == triIndices[i].i[2]) && (triIndices[j].i[1] == triIndices[i].i[0])) ||
                     
                     ((triIndices[j].i[2] == triIndices[i].i[2]) && (triIndices[j].i[0] == triIndices[i].i[0])) ||
                     ((triIndices[j].i[0] == triIndices[i].i[2]) && (triIndices[j].i[2] == triIndices[i].i[0])))
                {
                    //  neighbor 3
                    triIndices[i].n[2] = &triIndices[j];
                }
            }
        }
    }
    
    /*
     std::vector<IndexedTriangle> toShow;
     for(int i = 0; i < triIndices.size(); i += 50)
     {
     toShow.push_back(triIndices[i]);
     toShow.push_back(*triIndices[i].n[0]);
     toShow.push_back(*triIndices[i].n[1]);
     toShow.push_back(*triIndices[i].n[2]);
     }
     */
    
    
    borderIndices.clear();
    labels.clear();
    labelIndices.clear();
    
    
    for (int i = 0; i < vec2.size(); ++i) //für jeden Vertex
    {
        for (int j = 0; j < triIndices.size(); ++j) //für jedes Dreieck
        {
            if ((i == triIndices[j].i[0] || i == triIndices[j].i[1] || i == triIndices[j].i[2]))
            {
                vec2[i].NeigborTri.push_back(j);
            }
            
        }
    }
    
    
    int nrOptimization = 0;
    double tmpLengthIter = 0;
    double lengthIter = 0;
    bool firstTimeIter = false;
    
    do
    {
        std::ofstream fout("iter/50iter" + std::to_string(nrOptimization) + ".xyz", std::ofstream::out);
        
        if(!firstTimeIter)
            tmpLengthIter = 99999999;
        
        tmpLengthIter = lengthIter;
        lengthIter = 0;
        
        
        // 1 richtung
        for (int i = 0; i < vec2.size(); ++i) //für jeden Vertex
        {
            double inzidenzEdgesLength = 0;
            double tmp = 0;
            int index = 0;
            do {
                
                Edge e;
                tmp = inzidenzEdgesLength;
                if (index == 0)
                    tmp = 10000;
                inzidenzEdgesLength = 0;
                
                for (int j = 0; j < vec2[i].NeigborTri.size(); ++j) //für jedes Dreieck
                {
                    
                    triIndices[vec2[i].NeigborTri[j]].visible = true;
                    if ((i == triIndices[vec2[i].NeigborTri[j]].i[0] || i == triIndices[vec2[i].NeigborTri[j]].i[1] || i == triIndices[vec2[i].NeigborTri[j]].i[2]))
                    {
                        //Kanten rechnen sowie deren länge
                        if (ComputePointsBetweenEdges_v1(vec2, triIndices[vec2[i].NeigborTri[j]], e))
                        {
                            e.calculateLength();
                            inzidenzEdgesLength += e.length;
                        }
                    }
                    
                }
                
                index++;
                
                vec2[i].c += step;
                
                
                
                
            } while (tmp > inzidenzEdgesLength );
            
            
            vec2[i].c -= step;
            vec2[i].edgeLength = inzidenzEdgesLength;
        }
        
        // 2 richtung
        for (int i = 0; i < vec2.size(); ++i) //für jeden Vertex
        {
            double inzidenzEdgesLength = 0;
            double tmp = 0;
            int index = 0;
            do {
                
                Edge e;
                tmp = inzidenzEdgesLength;
                if (index == 0)
                    tmp = vec2[i].edgeLength;
                inzidenzEdgesLength = 0;
                
                for (int j = 0; j < vec2[i].NeigborTri.size(); ++j) //für jedes Dreieck
                {
                    
                    triIndices[vec2[i].NeigborTri[j]].visible = true;
                    if ((i == triIndices[vec2[i].NeigborTri[j]].i[0] || i == triIndices[vec2[i].NeigborTri[j]].i[1] || i == triIndices[vec2[i].NeigborTri[j]].i[2]))
                    {
                        //Kanten rechnen sowie deren länge
                        if (ComputePointsBetweenEdges_v1(vec2, triIndices[vec2[i].NeigborTri[j]], e))
                        {
                            e.calculateLength();
                            inzidenzEdgesLength += e.length;
                        }
                    }
                    
                }
                //edgeLengthBlock = inzidenzEdgesLength
                index++;
                
                vec2[i].c -= step;
                
                
            lengthIter += inzidenzEdgesLength;
            } while (tmp > inzidenzEdgesLength );
            
            vec2[i].c += step;
            
        }
        
        for (int j = 0; j < triIndices.size(); ++j) //für jedes Dreieck
        {
            Edge e2;
            if (ComputePointsBetweenEdges_v1(vec2, triIndices[j], e2)) {
                fout << e2.a.x() << " " << e2.a.y() << " " << e2.a.z() << std::endl;
                fout << e2.b.x() << " " << e2.b.y() << " " << e2.b.z() << std::endl;
            }
        }
        
        nrOptimization++;
        fout.close();
        std::cout << nrOptimization << std::endl;
        std::cout << lengthIter << std::endl;
        
        
    } while ( (nrOptimization < iter ) && ((tmpLengthIter >= lengthIter) || (nrOptimization < 450)) );
    
    /*End Optimization*/
    
    
    
    
    borderVertices.clear();
    for (int j = 0; j < triIndices.size(); ++j) //für jedes Dreieck
    {
        triIndices[j].iD = j;
        Edge e2;
        if (ComputePointsBetweenEdges_v1(vec2, triIndices[j], e2)) {
            borderVertices.push_back(e2.a);
            borderVertices.push_back(e2.b);
        }
    }
    
    // übergang erkennen
    for (int i = 0; i < triIndices.size(); ++i) {
        
        
        auto v0 = vec2.begin() + (triIndices[i].i[0]);
        auto v1 = vec2.begin() + (triIndices[i].i[1]);
        auto v2 = vec2.begin() + (triIndices[i].i[2]);
        
        double first = v0[0].c;
        double second = v1[0].c;
        double third = v2[0].c;
        
        if ((first >= 0.f && second < 0.f) || (first < 0.f && second >= 0.f) || (third >= 0.f && second < 0.f) || (third < 0.f && second >= 0.f) || (third >= 0.f && first < 0.f) || (third < 0.f && first >= 0.f)) {
            
            IndexedTriangle t;
            
            t.i[0] = triIndices[i].i[0];
            t.i[1] = triIndices[i].i[1];
            t.i[2] = triIndices[i].i[2];
            t.iD = triIndices[i].iD;
            t.n[0] = triIndices[i].n[0];
            t.n[1] = triIndices[i].n[1];
            t.n[2] = triIndices[i].n[2];
            borderIndices.push_back(t);
        }
        
    }
    
    //schallen unite
    for (int i = 0; i < borderIndices.size(); ++i)
        for (int j = 0; j < borderIndices.size(); ++j)
        {
            if ((borderIndices[i].i[0] == borderIndices[j].i[0]) || (borderIndices[i].i[0] == borderIndices[j].i[1]) || (borderIndices[i].i[0] == borderIndices[j].i[2]))
            {
                ufTri.unite(i, j);
            }
            
            if ((borderIndices[i].i[1] == borderIndices[j].i[0]) || (borderIndices[i].i[1] == borderIndices[j].i[1]) || (borderIndices[i].i[1] == borderIndices[j].i[2]))
            {
                ufTri.unite(i, j);
            }
            
            if ((borderIndices[i].i[2] == borderIndices[j].i[0]) || (borderIndices[i].i[2] == borderIndices[j].i[1]) || (borderIndices[i].i[2] == borderIndices[j].i[2]))
            {
                ufTri.unite(i, j);
            }
        }
    std::map<int, std::vector<IndexedTriangle>> labelmain;
    
    // in LabelIndices 1 eiziger gang speichern
    for (int i = 0; i < borderIndices.size(); ++i)
    {
        if (ufTri.num_in_set(ufTri.find(i)) <= 1);
        else {
            
            if (std::find(labels.begin(), labels.end(), ufTri.find(i)) != labels.end());
            else {
                labels.push_back(ufTri.find(i));
            }
        }
        
        if (labels.size() != 0)
        {
            int key = ufTri.find(i);
            
            labelmain[key].push_back(borderIndices[i]);
            
        }
    }
    std::vector<IndexedTriangle> newTriangles;
    
    
    
    int offset = (int)vec2.size();
    for (int label : labels)
        
    {
        labelIndices = labelmain[label];
        
        
        std::vector<Eigen::Vector3f> labelVertices;
        std::vector<Eigen::Vector3f> halleVertices;
        
        labelVertices.clear();
        Edge labelEdge;
        
        
        for (int i = 0; i < labelIndices.size(); ++i)
        {
            if (ComputePointsBetweenEdges_v1(vec2, labelIndices[i], labelEdge))
            {
                labelVertices.push_back(labelEdge.a);
                labelVertices.push_back(labelEdge.b);
            }
        }
        
        /*Plane Fitting with SVD */
        Eigen::Vector3f vor_c(0, 0, 0);
        Eigen::Vector3f c(0, 0, 0);
        Eigen::MatrixXf matA(3, labelVertices.size());
        
        // calculate c
        for (int i = 0; i < labelVertices.size(); ++i)
        {
            vor_c += labelVertices[i];
        }
        
        c = vor_c / labelVertices.size();
        
        //fill the Matrix
        for (int i = 0; i < labelVertices.size(); i++)
        {
            float xB, yB, zB;
            xB = labelVertices[i].x() - c.x();
            yB = labelVertices[i].y() - c.y();
            zB = labelVertices[i].z() - c.z();
            matA.col(i) << xB, yB, zB;
        }
        
        Eigen::JacobiSVD<Eigen::MatrixXf> svd(matA, Eigen::ComputeThinU | Eigen::ComputeThinV);
        Eigen::Vector3f planeNormal(0, 0, 0);
        planeNormal = svd.matrixU().col(2);
        float d = -(planeNormal.x()*c.x() + planeNormal.y()*c.y() + planeNormal.z()*c.z());
        
        /*Begin Normal direction of the plane*/
        std::vector<Eigen::Vector3f> halleVerticesSidePLane;
        std::vector<double> disthalleVerticesSidePLane;
        int indexHalleV;
       
        Eigen::Vector3f halle_cVector;
        
        
        //alle halle knoten hinzufügen
        for(int i = 0; i < labelIndices.size(); ++i)
        {
            if(vec2[labelIndices[i].i[0]].c >= 0)
                halleVerticesSidePLane.push_back(vec2[labelIndices[i].i[0]].p);
            
            if(vec2[labelIndices[i].i[1]].c >= 0)
                halleVerticesSidePLane.push_back(vec2[labelIndices[i].i[1]].p);
            
            if(vec2[labelIndices[i].i[2]].c >= 0)
                halleVerticesSidePLane.push_back(vec2[labelIndices[i].i[2]].p);
            
        }
        
        for(int i = 0; i < halleVerticesSidePLane.size(); ++i)
        {
            disthalleVerticesSidePLane.push_back( Dist2Plane(halleVerticesSidePLane[i], planeNormal, d));
        }
        
        indexHalleV = *std::max_element(disthalleVerticesSidePLane.begin(), disthalleVerticesSidePLane.end());
        
        halle_cVector = halleVerticesSidePLane[indexHalleV] - c;
        
        Eigen::Vector3f diffBetweenPlane_halle_positive = halle_cVector + planeNormal;
        Eigen::Vector3f diffBetweenPlane_halle_negative = halle_cVector - planeNormal;
        
        float lengthPositive =sqrt(diffBetweenPlane_halle_positive.x()* diffBetweenPlane_halle_positive.x() + diffBetweenPlane_halle_positive.y()* diffBetweenPlane_halle_positive.y() + diffBetweenPlane_halle_positive.z()* diffBetweenPlane_halle_positive.z() );
        float lengthNegative =sqrt(diffBetweenPlane_halle_negative.x()* diffBetweenPlane_halle_negative.x() + diffBetweenPlane_halle_negative.y()* diffBetweenPlane_halle_negative.y() + diffBetweenPlane_halle_negative.z()* diffBetweenPlane_halle_negative.z() );
        
        if (lengthPositive > lengthNegative)
            planeNormal = -planeNormal;
        else if(lengthPositive <= lengthNegative)
            planeNormal = planeNormal;
        
        
        
        // ax + bx + cx + d = 0 Plane equation
        d = -(planeNormal.x()*c.x() + planeNormal.y()*c.y() + planeNormal.z()*c.z());
        

        
        /*Begin computing Intersection Points*/
        
        
        
        
        //Point which has the largest distance to c
        std::vector<double> MaxDistanceToC;
        double radiusMax;
        
        for (int i = 0; i < labelVertices.size(); ++i)
        {
            MaxDistanceToC.push_back(Dist2Points(c, labelVertices[i]));
        }
        
        radiusMax = *std::max_element(MaxDistanceToC.begin(), MaxDistanceToC.end());
        
        //Computing Intersection points
        
        int succesor = offset - 1;
        
        std::vector<Edge> intersectionEdges;
        intersectionEdges.clear();
        
   
        //Label Nachbarn hinzufügen. 8schritte  3^12

        
        // First Triangle Hinzufügen die geschnitten wird und teil von lableindices ist
        IndexedTriangle firstTri;
        int firstTriID = 0;
        bool firstTimeSearch = true;
        
        for (int i = 0; i < labelIndices.size(); ++i){
            Eigen::Vector3f aP, bP, cP, intersection1, intersection2;
         
            int aI = 0, bI = 0, cI = 0;
           
            aI = labelIndices[i].i[0];
            bI = labelIndices[i].i[1];
            cI = labelIndices[i].i[2];
            
            aP = vec2[aI].p;
            bP = vec2[bI].p;
            cP = vec2[cI].p;
            
            if((SegPlaneIntersection(aP, bP, intersection1, planeNormal, d) || SegPlaneIntersection(aP, cP, intersection2, planeNormal, d) ||
                SegPlaneIntersection(bP, cP, intersection2, planeNormal, d)) && firstTimeSearch)
            {
                firstTri = triIndices[labelIndices[i].iD];
                firstTimeSearch = false;
                firstTriID = labelIndices[i].iD;
            }
                
                
        }
        
        std::set<int> triIndicesHit ;
        int mico = 0;
        do{
            for (int m = 0; m < 3 ; ++m)
            {
                Eigen::Vector3f aP, bP, cP, intersection1, intersection2;
                IndexedTriangle t1,t2;
                //indices
               int aI = firstTri.n[m]->i[0];
               int bI = firstTri.n[m]->i[1];
               int cI = firstTri.n[m]->i[2];
                
                
                aP = vec2[aI].p;
                bP = vec2[bI].p;
                cP = vec2[cI].p;
                
                
            if((SegPlaneIntersection(aP, bP, intersection1, planeNormal, d) || SegPlaneIntersection(aP, cP, intersection2, planeNormal, d) ||
                SegPlaneIntersection(bP, cP, intersection2, planeNormal, d)) && (triIndicesHit.find(firstTri.n[m]->iD) == triIndicesHit.end())){
                
                triIndicesHit.insert(firstTri.n[m]->iD);
               
                if (SegPlaneIntersection(aP, bP, intersection1, planeNormal, d) && SegPlaneIntersection(aP, cP, intersection2, planeNormal, d) && (Dist2Plane(aP, planeNormal, d) < 0))
                {
                    
                    newVertices.push_back(intersection1);
                    newVertices.push_back(intersection2);
                    
                    t1.i[0] = aI;
                    t1.i[2] = succesor + 2;
                    t1.i[1] = succesor + 1;
                    
                     t1.visible = true;
                    
                    newTriangles.push_back(t1);
             
                    firstTri = *firstTri.n[m];
                    
                    succesor++;
                    succesor++;
                }
                if (SegPlaneIntersection(bP, cP, intersection1, planeNormal, d) && SegPlaneIntersection(bP, aP, intersection2, planeNormal, d) && (Dist2Plane(bP, planeNormal, d) < 0))
                {
                    newVertices.push_back(intersection1);
                    newVertices.push_back(intersection2);
                    
                    t1.i[0] = bI;
                    t1.i[2] = succesor + 2;
                    t1.i[1] = succesor + 1;
                    
                     t1.visible = true;
                    
                    newTriangles.push_back(t1);
                    
            
                    firstTri = *firstTri.n[m];
                    succesor++;
                    succesor++;
                }
                if (SegPlaneIntersection(cP, aP, intersection1, planeNormal, d) && SegPlaneIntersection(cP, bP, intersection2, planeNormal, d) && (Dist2Plane(cP, planeNormal, d) < 0))
                {
                    newVertices.push_back(intersection1);
                    newVertices.push_back(intersection2);
                    
                    t1.i[0] = cI;
                    t1.i[2] = succesor + 2;
                    t1.i[1] = succesor + 1;
                   
                    t1.visible = true;
                   
                    newTriangles.push_back(t1);
                 
                    firstTri = *firstTri.n[m];
                    
                    succesor++;
                    succesor++;
                    
                }
                
                
                if (SegPlaneIntersection(aP, bP, intersection1, planeNormal, d) && SegPlaneIntersection(aP, cP, intersection2, planeNormal, d) && (Dist2Plane(bP, planeNormal, d) < 0) && (Dist2Plane(cP, planeNormal, d) < 0))
                {
                    newVertices.push_back(intersection1);
                    newVertices.push_back(intersection2);
                    
                    t1.i[0] = bI;
                    t1.i[2] = succesor + 1;
                    t1.i[1] = succesor + 2;
                    
                    t2.i[0] = cI;
                    t2.i[2] = bI;
                    t2.i[1] = succesor + 2;
                    t1.visible = true;
                    t2.visible = true;
                    
                    newTriangles.push_back(t1);
                    newTriangles.push_back(t2);
                    
                    firstTri = *firstTri.n[m];
                    
                    succesor++;
                    succesor++;
                }
                if (SegPlaneIntersection(bP, cP, intersection1, planeNormal, d) && SegPlaneIntersection(bP, aP, intersection2, planeNormal, d) && (Dist2Plane(cP, planeNormal, d) < 0) && (Dist2Plane(aP, planeNormal, d) < 0))
                {
                    newVertices.push_back(intersection1);
                    newVertices.push_back(intersection2);
                    
                    t1.i[0] = cI;
                    t1.i[2] = succesor + 1;
                    t1.i[1] = succesor + 2;
                    
                    t2.i[0] = aI;
                    t2.i[2] = cI;
                    t2.i[1] = succesor + 2;
                    
                    t1.visible = true;
                    t2.visible = true;
                    
                    newTriangles.push_back(t1);
                    newTriangles.push_back(t2);
        
                    firstTri = *firstTri.n[m];
                    
                    succesor++;
                    succesor++;
                }
                if (SegPlaneIntersection(cP, aP, intersection1, planeNormal, d) && SegPlaneIntersection(cP, bP, intersection2, planeNormal, d) && (Dist2Plane(aP, planeNormal, d) < 0) && (Dist2Plane(bP, planeNormal, d) < 0))
                {
                    newVertices.push_back(intersection1);
                    newVertices.push_back(intersection2);
                    
                    t1.i[0] = aI;
                    t1.i[2] = succesor + 1;
                    t1.i[1] = succesor + 2;
                    
                    t2.i[0] = bI;
                    t2.i[2] = aI;
                    t2.i[1] = succesor + 2;
                    
                    t1.visible = true;
                    t2.visible = true;
                    
                    newTriangles.push_back(t1);
                    newTriangles.push_back(t2);
                    
                    firstTri = *firstTri.n[m];
                    
                    succesor++;
                    succesor++;
                }
                
            }
                
        }
            
            mico ++;
            
        }while (mico < 1000000);
        
        
        //nachbarn von Schnitt hinzufügen
        std::set<int> extendedLabelIndices ;
        for (int i = 0; i < triIndices.size(); ++i)
        {
            const bool is_in = triIndicesHit.find(triIndices[i].iD) != triIndicesHit.end();
            
            if(is_in){
            extendedLabelIndices.insert(triIndices[i].iD);
                for(int j = 0; j < 3; ++j){
                    extendedLabelIndices.insert(triIndices[i].n[j]->iD);
                    for(int k = 0; k < 3; ++k){
                        extendedLabelIndices.insert(triIndices[i].n[j]->n[k]->iD);
                        for(int l = 0; l < 3; ++l){
                            extendedLabelIndices.insert(triIndices[i].n[j]->n[k]->n[l]->iD);
                            for(int f = 0; f < 3; ++f){
                                extendedLabelIndices.insert(triIndices[i].n[j]->n[k]->n[l]->n[f]->iD);
                                for(int s = 0; s < 3; ++s){
                                    extendedLabelIndices.insert(triIndices[i].n[j]->n[k]->n[l]->n[f]->n[s]->iD);
                                    for(int r = 0; r < 3; ++r){
                                        extendedLabelIndices.insert(triIndices[i].n[j]->n[k]->n[l]->n[f]->n[s]->n[r]->iD);
                                    }
                                }
                            }
                        }
                    }
                }
            }
        }
        
     /*   for (int i = 0; i < triIndices.size(); ++i)
        {
            int a = triIndices[i].i[0],b = triIndices[i].i[1] ,c = triIndices[i].i[2] ;
            if(( vec2[a].c >= 0 )&& ( vec2[b].c >= 0 ) && ( vec2[c].c >= 0 ))
            {
                triIndices[i].visible = true;
            }
            
        }*/
        
        
        for (int i = 0; i < triIndices.size(); ++i)
        {
            //positions
            Eigen::Vector3f aP, bP, cP, intersection1, intersection2;
            //distance to middlepoint
            float dista, distb, distc;
            //indices
            int aI = 0, bI = 0, cI = 0;
            
            
            aI = triIndices[i].i[0];
            bI = triIndices[i].i[1];
            cI = triIndices[i].i[2];
            
            aP = vec2[aI].p;
            bP = vec2[bI].p;
            cP = vec2[cI].p;
            
            dista = Dist2Points(c, aP);
            distb = Dist2Points(c, bP);
            distc = Dist2Points(c, cP);
            
            const bool is_in = extendedLabelIndices.find(triIndices[i].iD) != extendedLabelIndices.end();
            
            
            
            if (is_in)
            {
                if ((Dist2Plane(aP, planeNormal, d) > 0) || (Dist2Plane(bP, planeNormal, d) > 0) || (Dist2Plane(cP, planeNormal, d) > 0))
                {
                    triIndices[i].visible = false;
                }
                if (((Dist2Plane(aP, planeNormal, d) <= 0) || (Dist2Plane(bP, planeNormal, d) <= 0) || (Dist2Plane(cP, planeNormal, d) <= 0)) && ((Dist2Plane(aP, planeNormal, d) > 10) || (Dist2Plane(bP, planeNormal, d) > 10) || (Dist2Plane(cP, planeNormal, d) > 10)))
                {
                    triIndices[i].visible = true;
                }
            }
            
        }
        
        
        
        
        
        //umbrella
        Eigen::Vector3f vor_ca(0,0,0), ca(0, 0, 0);
        int anz = (int)newVertices.size() - (offset);
        for (int j =  offset ; j < (int)newVertices.size() ; ++j)
        {
            vor_ca += newVertices[j];
        }
        
        ca = vor_c / anz;
        newVertices.push_back(c);
        
        for (int j =  offset; j < newVertices.size()-2; j+=2)
        {
            IndexedTriangle t;
            t.i[1] = j;
            t.i[2] = j + 1;
            t.i[0] = (int)newVertices.size() - 1;
            newTriangles.push_back(t);
        }
        offset = (int)newVertices.size();
        /*End computing Intersection Points*/
    }
    for(auto t : triIndices)
        if(t.visible && (vec2[t.i[0]].c >= 0 || vec2[t.i[1]].c >= 0 || vec2[t.i[2]].c >= 0))
            newTriangles.push_back(t);
    
    
    
    
    
    
    WriteSegmentation(segmentationFile, segmentation2);
    ReadSegmentation(segmentationFile, segmentation, skeleton);
    
    
    //Test output
    std::cout << "Writing output file..." << std::endl;
    /*int colors[10][3] =
     {
     { 166, 206, 227 },
     { 31, 120, 180 },
     { 251, 154, 153 },
     { 227, 26, 28 },
     { 253, 191, 111 },
     { 255, 127, 0 },
     { 202, 178, 214 },
     { 106, 61, 154 },
     { 255, 255, 153 },
     { 177, 89, 40 }
     };*/
    auto colorFunc = [&](int i, int& r, int& g, int& b)
    {
        
        
        if (doubleSeg[i] == -1)
        {
            //color for passages
            r = 0; g = 175; b = 0;
        }
        else
            if (doubleSeg[i] >= 0)
            {
                r = 166; g = 175; b = 227;
            }
            else
                if (doubleSeg[i] == 0)
                {
                    r = 0; g = 0; b = 175;
                }
        
        
    };
    
    
    
    std::string segmentedMeshFile = outputDirectory + "/segmentedMesh.off";
    WriteOff(segmentedMeshFile.c_str(), vertices, triIndices, [&](int i, int& r, int& g, int& b) {colorFunc(meshVertexCorrespondsTo[i], r, g, b); });
    
    
    std::string segmentedMeshFile2 = outputDirectory + "/borderVertices.off";
    WriteOff(segmentedMeshFile2.c_str(), vertices, borderIndices, [&](int i, int& r, int& g, int& b) {colorFunc(meshVertexCorrespondsTo[i], r, g, b); });
    
    
    std::string segmentedMeshFile3 = outputDirectory + "/schalle.off";
    WriteOff(segmentedMeshFile3.c_str(), vertices, borderIndices, [&](int i, int& r, int& g, int& b) {colorFunc(meshVertexCorrespondsTo[i], r, g, b); });
    
    
    std::string segmentedMeshFile7 = outputDirectory + "/marchingBig.off";
    WriteOff(segmentedMeshFile7.c_str(), verwe , leerIndices, [&](int i, int& r, int& g, int& b) { r = 175; g = 0; b = 0; });
    
    std::string segmentedMeshFile8 = outputDirectory + "/new.off";
    WriteOff(segmentedMeshFile8.c_str(), newVertices, newTriangles, [&](int i, int& r, int& g, int& b) { r = 200; g = 200; b = 0; });
    
    //system("PAUSE");
}
void testConvexHullProjectionWithGravity()
{
    // For this test, we will use a simple convex hull of side 1 meter
    // centered in the origin of the world and laying on the XY plane
    iDynTree::ConvexHullProjectionConstraint projectionConstraint;
    iDynTree::Polygon convexHull;
    convexHull.m_vertices.push_back(iDynTree::Position( 0.5, 0.5, 0.0));
    convexHull.m_vertices.push_back(iDynTree::Position(-0.5, 0.5, 0.0));
    convexHull.m_vertices.push_back(iDynTree::Position(-0.5, -0.5, 0.0));
    convexHull.m_vertices.push_back(iDynTree::Position( 0.5, -0.5, 0.0));
    std::vector<iDynTree::Polygon> polygons;
    polygons.resize(1);
    polygons[0] = convexHull;


    // The convex hull is already expressed in the world
    std::vector<iDynTree::Transform> transforms;
    transforms.push_back(iDynTree::Transform::Identity());

    iDynTree::Direction xAxis(1.0, 0.0, 0.0);
    iDynTree::Direction yAxis(0.0, 1.0, 0.0);

    bool ok = projectionConstraint.buildConvexHull(xAxis,yAxis,iDynTree::Position::Zero(),polygons,transforms);
    ASSERT_IS_TRUE(ok);

    // Use a test position that is outside the convex hull with the normal projection
    iDynTree::Position testPoint(1.0, 0.0, 1.0);

    // Projected along the normal of the plane, this point is outside the convex hull
    // Note: positive margin means inside, negative outside
    ASSERT_IS_TRUE(projectionConstraint.computeMargin(projectionConstraint.project(testPoint)) < 0);

    //----------------------------------------------------------------------------------------------
    // If the direction along which we are projecting is the normal of the plane, the projection
    // should match the one done without direction
    iDynTree::Direction planeNormal(0.0, 0.0, -1.0);
    projectionConstraint.setProjectionAlongDirection(planeNormal);
    ASSERT_EQUAL_VECTOR(projectionConstraint.project(testPoint), projectionConstraint.projectAlongDirection(testPoint));

    //----------------------------------------------------------------------------------------------
    // If the direction of the projection is "skewed" to the left, the point should instead be inside the convex hull
    iDynTree::Direction skewedDirection(-1.0, 0.0, -1.0);

    projectionConstraint.setProjectionAlongDirection(skewedDirection);
    std::cerr << projectionConstraint.computeMargin(projectionConstraint.projectAlongDirection(testPoint)) << std::endl;

    ASSERT_IS_TRUE(projectionConstraint.computeMargin(projectionConstraint.projectAlongDirection(testPoint)) > 0);

    //----------------------------------------------------------------------------------------------
    // The code should work even if I try to project in the opposite direction wrt 'planeNormal'
    iDynTree::Direction upwardDirection(0.0, 0.0, 1.0);

    projectionConstraint.setProjectionAlongDirection(upwardDirection);

    // I should obtain the same result as the orthogonal projection
    ASSERT_EQUAL_VECTOR(projectionConstraint.project(testPoint), projectionConstraint.projectAlongDirection(testPoint));
    
    // The projected point should be again outside the convex hull
    ASSERT_IS_TRUE(projectionConstraint.computeMargin(projectionConstraint.projectAlongDirection(testPoint))< 0);

}
Esempio n. 12
0
void CCampathDrawer::OnPostRenderAllTools()
{
	// Actually we are often called twice per frame due to an engine bug(?), once after 3d skybox
	// and once after world is drawn, maybe we will be even called more times,
	// but we can not care about that for now.
	
	if(!m_Draw)
		return;

	if(!m_VertexShader)
	{
		m_VertexShader = g_AfxShaders.GetVertexShader("afx_line_vs20.fxo");
	}
	IDirect3DVertexShader9 * vertexShader = m_VertexShader->GetVertexShader();

	if(!m_PixelShader)
	{
		m_PixelShader = g_AfxShaders.GetPixelShader("afx_line_ps20.fxo");
	}
	IDirect3DPixelShader9 * pixelShader = m_PixelShader->GetPixelShader();

	if(!(m_Device && vertexShader && m_PixelShader && g_VEngineClient))
	{
		static bool firstError = true;

		if(firstError)
		{
			firstError = false;
			Tier0_Msg(
				"AFXERROR: CCampathDrawer::OnEndScene: Missing required dependencies:%s%s%s%s.\n",
				!m_Device ? " m_Device" : "",
				!vertexShader ? " vertexShader" : "",
				!pixelShader ? " pixelShader" : "",
				!g_VEngineClient ? " g_VEngineClient" : ""
			);
		}

		return;
	}

	// Save device state:

	IDirect3DPixelShader9 * oldPixelShader = 0;
	m_Device->GetPixelShader(&oldPixelShader);
	if(oldPixelShader) oldPixelShader->AddRef();

	IDirect3DVertexShader9 * oldVertexShader = 0;
	m_Device->GetVertexShader(&oldVertexShader);
	if(oldVertexShader) oldVertexShader->AddRef();

	IDirect3DVertexBuffer9 * oldVertexBuffer = 0;
	UINT oldVertexBufferOffset;
	UINT oldVertexBufferStride;
	m_Device->GetStreamSource(0, &oldVertexBuffer, &oldVertexBufferOffset, &oldVertexBufferStride);
	// this is done already according to doc: // if(oldVertexBuffer) oldVertexBuffer->AddRef();

	IDirect3DIndexBuffer9 * oldIndexBuffer = 0;
	m_Device->GetIndices(&oldIndexBuffer);
	// this is done already according to doc: // if(oldIndexBuffer) oldIndexBuffer->AddRef();

	IDirect3DVertexDeclaration9 * oldDeclaration;
	m_Device->GetVertexDeclaration(&oldDeclaration);
	if(oldDeclaration) oldDeclaration->AddRef();

	DWORD oldFVF;
	m_Device->GetFVF(&oldFVF);

	FLOAT oldCViewProj[4][4];
	m_Device->GetVertexShaderConstantF(8, oldCViewProj[0], 4);

	FLOAT oldCScreenInfo[4];
	m_Device->GetVertexShaderConstantF(48, oldCScreenInfo, 1);

	FLOAT oldCPlane0[4];
	m_Device->GetVertexShaderConstantF(49, oldCPlane0, 1);

	FLOAT oldCPlaneN[4];
	m_Device->GetVertexShaderConstantF(50, oldCPlaneN, 1);

	DWORD oldSrgbWriteEnable;
	m_Device->GetRenderState(D3DRS_SRGBWRITEENABLE, &oldSrgbWriteEnable);

	DWORD oldColorWriteEnable;
	m_Device->GetRenderState(D3DRS_COLORWRITEENABLE, &oldColorWriteEnable);

	DWORD oldZEnable;
	m_Device->GetRenderState(D3DRS_ZENABLE, &oldZEnable);

	DWORD oldZWriteEnable;
	m_Device->GetRenderState(D3DRS_ZWRITEENABLE, &oldZWriteEnable);
	
	DWORD oldZFunc;
	m_Device->GetRenderState(D3DRS_ZFUNC, &oldZFunc);

	DWORD oldAlphaTestEnable;
	m_Device->GetRenderState(D3DRS_ALPHATESTENABLE, &oldAlphaTestEnable);

	DWORD oldSeparateAlphaBlendEnable;
	m_Device->GetRenderState(D3DRS_SEPARATEALPHABLENDENABLE, &oldSeparateAlphaBlendEnable);

	DWORD oldAlphaBlendEnable;
	m_Device->GetRenderState(D3DRS_ALPHABLENDENABLE, &oldAlphaBlendEnable);

	DWORD oldBlendOp;
	m_Device->GetRenderState(D3DRS_BLENDOP, &oldBlendOp);

	DWORD oldSrcBlend;
	m_Device->GetRenderState(D3DRS_SRCBLEND, &oldSrcBlend);

	DWORD oldDestBlend;
	m_Device->GetRenderState(D3DRS_DESTBLEND, &oldDestBlend);

	DWORD oldCullMode;
	m_Device->GetRenderState(D3DRS_CULLMODE, &oldCullMode);

	// Draw:
	{
		//Vector3 vvForward, vvUp, vvRight, vvPos;

		double curTime = g_Hook_VClient_RenderView.GetCurTime();
		bool inCampath = 1 <= g_Hook_VClient_RenderView.m_CamPath.GetSize()
			&&	g_Hook_VClient_RenderView.m_CamPath.GetLowerBound() <= curTime
			&& curTime <= g_Hook_VClient_RenderView.m_CamPath.GetUpperBound();
		bool campathCanEval = g_Hook_VClient_RenderView.m_CamPath.CanEval();
		bool campathEnabled = g_Hook_VClient_RenderView.m_CamPath.Enabled_get();
		bool cameraMightBeSelected = false;

		m_Device->SetRenderState(D3DRS_SRGBWRITEENABLE, FALSE);
		m_Device->SetRenderState(D3DRS_COLORWRITEENABLE, D3DCOLORWRITEENABLE_ALPHA|D3DCOLORWRITEENABLE_BLUE|D3DCOLORWRITEENABLE_GREEN|D3DCOLORWRITEENABLE_RED);
		m_Device->SetRenderState(D3DRS_ZENABLE, D3DZB_TRUE);
		m_Device->SetRenderState(D3DRS_ZWRITEENABLE, FALSE);
		m_Device->SetRenderState(D3DRS_ZFUNC, D3DCMP_LESSEQUAL);
		m_Device->SetRenderState(D3DRS_ALPHATESTENABLE, FALSE);
		m_Device->SetRenderState(D3DRS_SEPARATEALPHABLENDENABLE, FALSE);
		m_Device->SetRenderState(D3DRS_ALPHABLENDENABLE, TRUE);
		m_Device->SetRenderState(D3DRS_BLENDOP, D3DBLENDOP_ADD);
		m_Device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
		m_Device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
		m_Device->SetRenderState(D3DRS_CULLMODE, D3DCULL_CCW);

		m_Device->SetVertexShader(vertexShader);

		m_WorldToScreenMatrix = g_VEngineClient->WorldToScreenMatrix();
			
		m_Device->SetVertexShaderConstantF(8, m_WorldToScreenMatrix.m[0], 4);

		// Provide view plane info for line clipping:
		{
			double plane0[4]={0,0,0,1};
			double planeN[4]={1,0,0,1};
			//double planeR[4]={0,-1,0,1};
			//double planeU[4]={0,0,1,1};

			unsigned char P[4];
			unsigned char Q[4];

			double L[4][4];
			double U[4][4];

			double M[4][4] = {
				m_WorldToScreenMatrix.m[0][0], m_WorldToScreenMatrix.m[0][1], m_WorldToScreenMatrix.m[0][2], 0,
				m_WorldToScreenMatrix.m[1][0], m_WorldToScreenMatrix.m[1][1], m_WorldToScreenMatrix.m[1][2], 0,
				m_WorldToScreenMatrix.m[2][0], m_WorldToScreenMatrix.m[2][1], m_WorldToScreenMatrix.m[2][2], 0,
				m_WorldToScreenMatrix.m[3][0], m_WorldToScreenMatrix.m[3][1], m_WorldToScreenMatrix.m[3][2], -1,
			};

			double b0[4] = {
				0 -m_WorldToScreenMatrix.m[0][3],
				0 -m_WorldToScreenMatrix.m[1][3],
				0 -m_WorldToScreenMatrix.m[2][3],
				-m_WorldToScreenMatrix.m[3][3],
			};

			double bN[4] = {
				0 -m_WorldToScreenMatrix.m[0][3],
				0 -m_WorldToScreenMatrix.m[1][3],
				1 -m_WorldToScreenMatrix.m[2][3],
				-m_WorldToScreenMatrix.m[3][3],
			};
			/*
			double bR[4] = {
				1 -m_WorldToScreenMatrix.m[0][3],
				0 -m_WorldToScreenMatrix.m[1][3],
				0 -m_WorldToScreenMatrix.m[2][3],
				-m_WorldToScreenMatrix.m[3][3],
			};

			double bU[4] = {
				0 -m_WorldToScreenMatrix.m[0][3],
				1 -m_WorldToScreenMatrix.m[1][3],
				0 -m_WorldToScreenMatrix.m[2][3],
				-m_WorldToScreenMatrix.m[3][3],
			};
			*/
			if(!LUdecomposition(M, P, Q, L, U))
			{
				Tier0_Warning("AFXERROR in CCampathDrawer::OnPostRenderAllTools: LUdecomposition failed\n");
			}
			else
			{
				SolveWithLU(L, U, P, Q, b0, plane0);
				SolveWithLU(L, U, P, Q, bN, planeN);
				
				//SolveWithLU(L, U, P, Q, bR, planeR);
				//SolveWithLU(L, U, P, Q, bU, planeU);
			}

			/*
			vvPos = Vector3(plane0[0], plane0[1], plane0[2]);
			vvForward = Vector3(planeN[0] -vvPos.X, planeN[1] -vvPos.Y, planeN[2]-vvPos.Z);
			vvForward.Normalize();
			vvRight = Vector3(planeR[0] -vvPos.X, planeR[1] -vvPos.Y, planeR[2]-vvPos.Z);
			vvRight.Normalize();
			vvUp = Vector3(planeU[0] -vvPos.X, planeU[1] -vvPos.Y, planeU[2]-vvPos.Z);
			vvUp.Normalize();
			*/

			/*
			Tier0_Msg("CCampathDrawer::OnPostRenderAllTools: curTime = %f\n",curTime);
			Tier0_Msg("M[0]=%f %f %f %f\nM[1]=%f %f %f %f\nM[2]=%f %f %f %f\nM[3]=%f %f %f %f\n", M[0][0],M[0][1],M[0][2],M[0][3], M[1][0],M[1][1],M[1][2],M[1][3], M[2][0],M[2][1],M[2][2],M[2][3], M[3][0],M[3][1],M[3][2],M[3][3]);
			Tier0_Msg("b0[0]=%f %f %f %f\n", b0[0], b0[1], b0[2], b0[3]);
			Tier0_Msg("bN[0]=%f %f %f %f\n", bN[0], bN[1], bN[2], bN[3]);
			Tier0_Msg("plane0=%f %f %f %f\n", plane0[0], plane0[1], plane0[2], plane0[3]);
			Tier0_Msg("planeN=%f %f %f %f\n", planeN[0], planeN[1], planeN[2], planeN[3]);
			*/

			FLOAT vPlane0[4] = {(float)plane0[0], (float)plane0[1], (float)plane0[2], 0.0f};

			Vector3 planeNormal(planeN[0] -plane0[0], planeN[1] -plane0[1], planeN[2] -plane0[2]);
			planeNormal.Normalize();

			FLOAT vPlaneN[4] = {(float)planeNormal.X, (float)planeNormal.Y, (float)planeNormal.Z, 0.0f};

			m_Device->SetVertexShaderConstantF(49, vPlane0, 1);
			m_Device->SetVertexShaderConstantF(50, vPlaneN, 1);
		}

		m_Device->SetPixelShader(pixelShader);

		m_Device->SetFVF(CCampathDrawer_VertexFVF);

		int screenWidth, screenHeight;
		g_VEngineClient->GetScreenSize(screenWidth, screenHeight);
		FLOAT newCScreenInfo[4] = { 0 != screenWidth ? 1.0f / screenWidth : 0.0f, 0 != screenHeight ? 1.0f / screenHeight : 0.0f, 0.0, 0.0f};

		// Draw trajectory:
		if(2 <= g_Hook_VClient_RenderView.m_CamPath.GetSize() && campathCanEval)
		{
			if(m_RebuildDrawing)
			{
				// Rebuild trajectory points.
				// This operation can be quite expensive (up to O(N^2)),
				// so it should be done only when s.th.
				// changed (which is what we do here).

				m_TrajectoryPoints.clear();
				
				CamPathIterator last = g_Hook_VClient_RenderView.m_CamPath.GetBegin();				
				CamPathIterator it = last;

				TempPoint * pts = new TempPoint[c_CameraTrajectoryMaxPointsPerInterval];

				for(++it; it != g_Hook_VClient_RenderView.m_CamPath.GetEnd(); ++it)
				{
					double delta = it.GetTime() -last.GetTime();

					for(size_t i = 0; i<c_CameraTrajectoryMaxPointsPerInterval; i++)
					{
						double t = last.GetTime() + delta*((double)i/(c_CameraTrajectoryMaxPointsPerInterval-1));

						CamPathValue cpv = g_Hook_VClient_RenderView.m_CamPath.Eval(t);

						pts[i].t = t;
						pts[i].y = Vector3(cpv.X, cpv.Y, cpv.Z);
						pts[i].nextPt = i+1 <c_CameraTrajectoryMaxPointsPerInterval ? &(pts[i+1]) : 0;
					}

					RamerDouglasPeucker(&(pts[0]), &(pts[c_CameraTrajectoryMaxPointsPerInterval-1]), c_CameraTrajectoryEpsilon);

					// add all points except the last one (to avoid duplicates):
					for(TempPoint * pt = &(pts[0]); pt && pt->nextPt; pt = pt->nextPt)
					{
						m_TrajectoryPoints.push_back(pt->t);
					}

					last = it;
				}

				// add last point:
				m_TrajectoryPoints.push_back(pts[c_CameraTrajectoryMaxPointsPerInterval-1].t);

				delete pts;

				m_RebuildDrawing = false;
			}

			newCScreenInfo[2] = c_CameraTrajectoryPixelWidth;
			m_Device->SetVertexShaderConstantF(48, newCScreenInfo, 1);

			AutoPolyLineStart();

			std::list<double>::iterator itPts = m_TrajectoryPoints.begin();

			CamPathIterator itKeysLast = g_Hook_VClient_RenderView.m_CamPath.GetBegin();
			CamPathIterator itKeysNext = itKeysLast;
			++itKeysNext;

			bool hasLastPt = false;
			bool hasNextPt = false;
			bool hasCurPt = false;
			
			double lastPtTime;
			CamPathValue lastPtValue;
			double curPtTime;
			CamPathValue curPtValue;
			double nextPtTime;
			CamPathValue nextPtValue;

			do
			{
				if(hasNextPt)
				{
					hasLastPt = true;
					lastPtTime = curPtTime;
					lastPtValue = curPtValue;

					hasCurPt = true;
					curPtTime = nextPtTime;
					curPtValue = nextPtValue;

					hasNextPt = false;
				}
				else
				{
					hasCurPt = true;
					curPtTime = *itPts;
					curPtValue = g_Hook_VClient_RenderView.m_CamPath.Eval(curPtTime);
					++itPts;
				}

				while(itKeysNext.GetTime() < curPtTime)
				{
					itKeysLast = itKeysNext;
					++itKeysNext;
				}

				if(itPts != m_TrajectoryPoints.end())
				{
					hasNextPt = true;
					nextPtTime = *itPts;
					nextPtValue = g_Hook_VClient_RenderView.m_CamPath.Eval(nextPtTime);
					++itPts;
				}
				else
				{
					// current point is last point.
					hasNextPt = false;
					nextPtValue = curPtValue;
				}

				if(!hasLastPt)
				{
					// current point is first point.
					lastPtValue = curPtValue;
				}

				// emit current point:
				{
					double deltaTime = abs(curTime -curPtTime);

					DWORD colour;

					// determine colour:
					if(deltaTime < 1.0)
					{
						double t = (deltaTime -0.0)/1.0;
						colour = D3DCOLOR_RGBA(
							ValToUCCondInv(255.0*t, curPtValue.Selected),
							ValToUCCondInv(255, curPtValue.Selected),
							ValToUCCondInv(0, curPtValue.Selected),
							(unsigned char)(127*(1.0-t))+128
						);
					}
					else
					if(deltaTime < 2.0)
					{
						double t = (deltaTime -1.0)/1.0;
						colour = D3DCOLOR_RGBA(
							ValToUCCondInv(255, curPtValue.Selected),
							ValToUCCondInv(255.0*(1.0-t), curPtValue.Selected),
							ValToUCCondInv(0, curPtValue.Selected),
							(unsigned char)(64*(1.0-t))+64
						);
					}
					else
					{
						colour = D3DCOLOR_RGBA(
							ValToUCCondInv(255, curPtValue.Selected),
							ValToUCCondInv(0, curPtValue.Selected),
							ValToUCCondInv(0, curPtValue.Selected),
							64
						);
					}

					AutoPolyLinePoint(
						Vector3(lastPtValue.X,lastPtValue.Y,lastPtValue.Z)
						, Vector3(curPtValue.X,curPtValue.Y,curPtValue.Z)
						, colour
						, Vector3(nextPtValue.X,nextPtValue.Y,nextPtValue.Z));
				}
			}
			while(hasNextPt);

			AutoPolyLineFlush();
		}

		// Draw keyframes:
		{
			newCScreenInfo[2] = c_CampathCrossPixelWidth;
			m_Device->SetVertexShaderConstantF(48, newCScreenInfo, 1);

			bool lpSelected = false;
			double lpTime;
			
			/*if(0 < g_Hook_VClient_RenderView.m_CamPath.GetSize())
			{
				// Test for not too unlikely hard case:
				CamPathValue cpv = g_Hook_VClient_RenderView.m_CamPath.GetBegin().GetValue();
				Vector3 current(cpv.X+76, cpv.Y+76, cpv.Z+76);
				Vector3 previous(current.X+76, current.Y-1*4, current.Z);
				Vector3 next(current.X+76, current.Y+1*4, current.Z);
				Vector3 next2(current.X, current.Y+2*4, current.Z);
				Vector3 next3(current.X+76, current.Y+3*4, current.Z);
				Vector3 next4(current.X, current.Y+4*4, current.Z);
				Vector3 next5(current.X+76, current.Y+5*4, current.Z);

				AutoPolyLineStart();
				AutoPolyLinePoint(previous, previous, D3DCOLOR_RGBA(255,0,0,255), current);
				AutoPolyLinePoint(previous, current, D3DCOLOR_RGBA(255,0,0,255), next);
				AutoPolyLinePoint(current, next, D3DCOLOR_RGBA(255,0,0,255), next2);
				AutoPolyLinePoint(next, next2, D3DCOLOR_RGBA(255,0,0,255), next3);
				AutoPolyLinePoint(next2, next3, D3DCOLOR_RGBA(255,0,0,255), next4);
				AutoPolyLinePoint(next3, next4, D3DCOLOR_RGBA(255,0,0,255), next5);
				AutoPolyLinePoint(next4, next5, D3DCOLOR_RGBA(255,0,0,255), next5);
				AutoPolyLineFlush();
			}*/
			
			/*if(0 < g_Hook_VClient_RenderView.m_CamPath.GetSize())
			{
				CamPathValue cpv = g_Hook_VClient_RenderView.m_CamPath.GetBegin().GetValue();

				float x = cpv.X * m_WorldToScreenMatrix.m[0][0] + cpv.Y * m_WorldToScreenMatrix.m[0][1] + cpv.Z * m_WorldToScreenMatrix.m[0][2] +m_WorldToScreenMatrix.m[0][3];
				float y = cpv.X * m_WorldToScreenMatrix.m[1][0] + cpv.Y * m_WorldToScreenMatrix.m[1][1] + cpv.Z * m_WorldToScreenMatrix.m[1][2] +m_WorldToScreenMatrix.m[1][3];
				float z = cpv.X * m_WorldToScreenMatrix.m[2][0] + cpv.Y * m_WorldToScreenMatrix.m[2][1] + cpv.Z * m_WorldToScreenMatrix.m[2][2] +m_WorldToScreenMatrix.m[2][3];
				float w = cpv.X * m_WorldToScreenMatrix.m[3][0] + cpv.Y * m_WorldToScreenMatrix.m[3][1] + cpv.Z * m_WorldToScreenMatrix.m[3][2] +m_WorldToScreenMatrix.m[3][3];

				float iw = w ? 1/w : 0;

				Tier0_Msg("pt: %f %f %f %f -> %f %f %f %f\n",x,y,z,w,x*iw,y*iw,z*iw,w*iw);
			}*/
		
			for(CamPathIterator it = g_Hook_VClient_RenderView.m_CamPath.GetBegin(); it != g_Hook_VClient_RenderView.m_CamPath.GetEnd(); ++it)
			{
				double cpT = it.GetTime();
				CamPathValue cpv = it.GetValue();

				cameraMightBeSelected = cameraMightBeSelected || lpSelected && cpv.Selected && lpTime <= curTime && curTime <= cpT;

				lpSelected = cpv.Selected;
				lpTime = cpT;

				double deltaTime = abs(curTime -cpT);

				bool selected = cpv.Selected;

				DWORD colour;

				// determine colour:
				if(deltaTime < 1.0)
				{
					double t = (deltaTime -0.0)/1.0;
					colour = D3DCOLOR_RGBA(
						ValToUCCondInv(255.0*t, selected),
						ValToUCCondInv(255, selected),
						ValToUCCondInv(0, selected),
						(unsigned char)(127*(1.0-t))+128
					);
				}
				else
				if(deltaTime < 2.0)
				{
					double t = (deltaTime -1.0)/1.0;
					colour = D3DCOLOR_RGBA(
						ValToUCCondInv(255, selected),
						ValToUCCondInv(255.0*(1.0-t), selected),
						ValToUCCondInv(0, selected),
						(unsigned char)(64*(1.0-t))+64
					);
				}
				else
				{
					colour = D3DCOLOR_RGBA(
						ValToUCCondInv(255, selected),
						ValToUCCondInv(0, selected),
						ValToUCCondInv(0, selected),
						64
					);
				}

				// x / forward line:

				AutoSingleLine(
					Vector3(cpv.X -c_CampathCrossRadius, cpv.Y, cpv.Z),
					colour,
					Vector3(cpv.X +c_CampathCrossRadius, cpv.Y, cpv.Z),
					colour
				);

				// y / left line:

				AutoSingleLine(
					Vector3(cpv.X, cpv.Y -c_CampathCrossRadius, cpv.Z),
					colour,
					Vector3(cpv.X, cpv.Y +c_CampathCrossRadius, cpv.Z),
					colour
				);

				// z / up line:

				AutoSingleLine(
					Vector3(cpv.X, cpv.Y, cpv.Z -c_CampathCrossRadius),
					colour,
					Vector3(cpv.X, cpv.Y, cpv.Z +c_CampathCrossRadius),
					colour
				);
			}

			AutoSingleLineFlush();
		}

		// Draw wireframe camera:
		if(inCampath && campathCanEval)
		{
			newCScreenInfo[2] = c_CameraPixelWidth;
			m_Device->SetVertexShaderConstantF(48, newCScreenInfo, 1);

			DWORD colourCam = campathEnabled
				? D3DCOLOR_RGBA(
					ValToUCCondInv(255,cameraMightBeSelected),
					ValToUCCondInv(0,cameraMightBeSelected),
					ValToUCCondInv(255,cameraMightBeSelected),
					128)
				: D3DCOLOR_RGBA(
					ValToUCCondInv(255,cameraMightBeSelected),
					ValToUCCondInv(255,cameraMightBeSelected),
					ValToUCCondInv(255,cameraMightBeSelected),
					128);
			DWORD colourCamUp = campathEnabled
				? D3DCOLOR_RGBA(
					ValToUCCondInv(0,cameraMightBeSelected),
					ValToUCCondInv(255,cameraMightBeSelected),
					ValToUCCondInv(0,cameraMightBeSelected),
					128)
				: D3DCOLOR_RGBA(
					ValToUCCondInv(0,cameraMightBeSelected),
					ValToUCCondInv(0,cameraMightBeSelected),
					ValToUCCondInv(0,cameraMightBeSelected),
					128);

			CamPathValue cpv = g_Hook_VClient_RenderView.m_CamPath.Eval(curTime);

			// limit to values as RenderView hook:
			cpv.Fov = max(1,cpv.Fov);
			cpv.Fov = min(179,cpv.Fov);

			double forward[3], right[3], up[3];
			QEulerAngles ang = cpv.R.ToQREulerAngles().ToQEulerAngles();
			MakeVectors(ang.Roll, ang.Pitch, ang.Yaw, forward, right, up);

			Vector3 vCp(cpv.X, cpv.Y, cpv.Z);
			Vector3 vForward(forward);
			Vector3 vUp(up);
			Vector3 vRight(right);

			//Tier0_Msg("----------------",curTime);
			//Tier0_Msg("currenTime = %f",curTime);
			//Tier0_Msg("vCp = %f %f %f\n", vCp.X, vCp.Y, vCp.Z);

			double a = sin(cpv.Fov * M_PI / 360.0) * c_CameraRadius;
			double b = a;

			int screenWidth, screenHeight;
			g_VEngineClient->GetScreenSize(screenWidth, screenHeight);

			double aspectRatio = screenWidth ? (double)screenHeight / (double)screenWidth : 1.0;

			b *= aspectRatio;

			Vector3 vLU = vCp +(double)c_CameraRadius * vForward -a * vRight +b * vUp;
			Vector3 vRU = vCp +(double)c_CameraRadius * vForward +a * vRight +b * vUp;
			Vector3 vLD = vCp +(double)c_CameraRadius * vForward -a * vRight -b * vUp;
			Vector3 vRD = vCp +(double)c_CameraRadius * vForward +a * vRight -b * vUp;
			Vector3 vMU = vLU +(vRU -vLU)/2;
			Vector3 vMUU = vMU +(double)c_CameraRadius * vUp;

			AutoSingleLine(vCp, colourCam, vLD, colourCam);

			AutoSingleLine(vCp, colourCam, vRD, colourCam);

			AutoSingleLine(vCp, colourCam, vLU, colourCam);

			AutoSingleLine(vCp, colourCam, vRU, colourCam);

			AutoSingleLine(vLD, colourCam, vRD, colourCam);

			AutoSingleLine(vRD, colourCam, vRU, colourCam);

			AutoSingleLine(vRU, colourCam, vLU, colourCam);

			AutoSingleLine(vLU, colourCam, vLD, colourCam);

			AutoSingleLine(vMU, colourCam, vMUU, colourCamUp);

			AutoSingleLineFlush();

			//
			/*

			colourCam = D3DCOLOR_RGBA(255, 0, 0, 255);
			colourCamUp = D3DCOLOR_RGBA(255, 255, 0, 255);

			vCp = vvPos;
			vForward = vvForward;
			vUp = vvUp;
			vRight = vvRight;

			//Tier0_Msg("vCp2 = %f %f %f\n", vCp.X, vCp.Y, vCp.Z);

			vLU = vCp +(double)c_CameraRadius * vForward -a * vRight +b * vUp;
			vRU = vCp +(double)c_CameraRadius * vForward +a * vRight +b * vUp;
			vLD = vCp +(double)c_CameraRadius * vForward -a * vRight -b * vUp;
			vRD = vCp +(double)c_CameraRadius * vForward +a * vRight -b * vUp;
			vMU = vLU +(vRU -vLU)/2;
			vMUU = vMU +(double)c_CameraRadius * vUp;

			AutoSingleLine(vCp, colourCam, vLD, colourCam);

			AutoSingleLine(vCp, colourCam, vRD, colourCam);

			AutoSingleLine(vCp, colourCam, vLU, colourCam);

			AutoSingleLine(vCp, colourCam, vRU, colourCam);

			AutoSingleLine(vLD, colourCam, vRD, colourCam);

			AutoSingleLine(vRD, colourCam, vRU, colourCam);

			AutoSingleLine(vRU, colourCam, vLU, colourCam);

			AutoSingleLine(vLU, colourCam, vLD, colourCam);

			AutoSingleLine(vMU, colourCam, vMUU, colourCamUp);

			AutoSingleLineFlush();
			*/
		}
	}

	// Restore device state:

	m_Device->SetPixelShader(oldPixelShader);
	if(oldPixelShader) oldPixelShader->Release();

	m_Device->SetVertexShader(oldVertexShader);
	if(oldVertexShader) oldVertexShader->Release();

	m_Device->SetStreamSource(0, oldVertexBuffer, oldVertexBufferOffset, oldVertexBufferStride);
	if(oldVertexBuffer) oldVertexBuffer->Release();

	m_Device->SetIndices(oldIndexBuffer);
	if(oldIndexBuffer) oldIndexBuffer->Release();

	m_Device->SetFVF(oldFVF);

	m_Device->SetVertexDeclaration(oldDeclaration);
	if(oldDeclaration) oldDeclaration->Release();

	m_Device->SetVertexShaderConstantF(8, oldCViewProj[0], 4);
	m_Device->SetVertexShaderConstantF(48, oldCScreenInfo, 1);
	m_Device->SetVertexShaderConstantF(49, oldCPlane0, 1);
	m_Device->SetVertexShaderConstantF(50, oldCPlaneN, 1);

	m_Device->SetRenderState(D3DRS_CULLMODE, oldCullMode);
	m_Device->SetRenderState(D3DRS_DESTBLEND, oldDestBlend);
	m_Device->SetRenderState(D3DRS_SRCBLEND, oldSrcBlend);
	m_Device->SetRenderState(D3DRS_BLENDOP, oldBlendOp);
	m_Device->SetRenderState(D3DRS_ALPHABLENDENABLE, oldAlphaBlendEnable);
	m_Device->SetRenderState(D3DRS_SEPARATEALPHABLENDENABLE, oldSeparateAlphaBlendEnable);
	m_Device->SetRenderState(D3DRS_ALPHATESTENABLE, oldAlphaTestEnable);
	m_Device->SetRenderState(D3DRS_ZFUNC, oldZFunc);
	m_Device->SetRenderState(D3DRS_ZWRITEENABLE, oldZWriteEnable);
	m_Device->SetRenderState(D3DRS_ZENABLE, oldZEnable);
	m_Device->SetRenderState(D3DRS_COLORWRITEENABLE, oldColorWriteEnable);
	m_Device->SetRenderState(D3DRS_SRGBWRITEENABLE, oldSrgbWriteEnable);
}
Esempio n. 13
0
//--------------------------------------------------------------
void ofApp::setup(){
	ofSetLogLevel(OF_LOG_VERBOSE);
	
	glEnable(GL_DEPTH_TEST);
	
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
	vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clouds;
	
	mesh.load(ofToDataPath("out.ply"));
	cloud = ofxPCL::toPCL<ofxPCL::PointXYZCloud>(mesh);
	
	textures.resize(2);
	textures.at(0).loadImage(ofToDataPath("tex0.jpg"));
	textures.at(1).loadImage(ofToDataPath("tex1.jpg"));
	
	pcl::SacModel model_type = pcl::SACMODEL_PLANE;
	float distance_threshold = 30;
	int min_points_limit = 10;
	int max_segment_count = 30;
	
	pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients());
	pcl::PointIndices::Ptr inliers(new pcl::PointIndices());
	
	pcl::SACSegmentation<pcl::PointXYZ> seg;
	seg.setOptimizeCoefficients(false);
	
	seg.setModelType(model_type);
	seg.setMethodType(pcl::SAC_RANSAC);
	seg.setDistanceThreshold(distance_threshold);
	seg.setMaxIterations(500);
	
	pcl::PointCloud<pcl::PointXYZ>::Ptr temp(new pcl::PointCloud<pcl::PointXYZ>(*cloud));
	const size_t original_szie = temp->points.size();
	
	pcl::ExtractIndices<pcl::PointXYZ> extract;
	
	int segment_count = 0;
	while (temp->size() > original_szie * 0.3)
	{
		if (segment_count > max_segment_count) break;
		segment_count++;
		
		seg.setInputCloud(temp);
		seg.segment(*inliers, *coefficients);
		
		if (inliers->indices.size() < min_points_limit)
			break;
		
		pcl::PointCloud<pcl::PointXYZ>::Ptr filterd_point_cloud(new pcl::PointCloud<pcl::PointXYZ>);
		
		extract.setInputCloud(temp);
		extract.setIndices(inliers);
		extract.setNegative(false);
		extract.filter(*filterd_point_cloud);
		
		if (filterd_point_cloud->points.size() > 0)
		{
			clouds.push_back(filterd_point_cloud);
		}
		
		extract.setNegative(true);
		extract.filter(*temp);
		
		ofMesh m;
		pcl::PointCloud<pcl::PointNormal>::Ptr cloud_with_normals(new pcl::PointCloud<pcl::PointNormal>);
		
		ofxPCL::normalEstimation(filterd_point_cloud, cloud_with_normals);
		
		m = ofxPCL::triangulate(cloud_with_normals, 100);
		m.clearColors();
		jointMesh.addVertices(m.getVertices());
		jointMesh.addColors(m.getColors());
		
		ofVec3f center = m.getCentroid();
		ofVec3f planeNormal(coefficients->values[0], coefficients->values[1], coefficients->values[2]);
		
		ofVec3f tangent, bitangent;
		ofVec3f arb(0, 1, 0);
		tangent = arb.cross(planeNormal).normalize();
		bitangent = planeNormal.cross(tangent).normalize();
		
		for(int j = 0; j < m.getNumVertices(); j++) {
			float x = m.getVertex(j).dot(tangent) * 0.001;
			x = x - (long)x;
			if(x < 0) x += 1;
			float y = m.getVertex(j).dot(bitangent) * 0.001;
			y = y - (long)y;
			if(y < 0) y += 1;
			m.addTexCoord(ofVec2f(x, y));
		}
		meshes.push_back(m);
	}
	
	ofxPCL::convert(temp, meshResidual);
	
	ofLogVerbose() << clouds.size() << " meshes extracted";
	
	center = jointMesh.getCentroid();
}
Esempio n. 14
0
Intersection* Plane3D::testRayIntersection(Ray r)
{
	/*std::cout << "ray start pos " << r.startPosition.x << " " << r.startPosition.y << " " << r.startPosition.z << std::endl;
	std::cout << "ray direction " << r.direction.x << " " << r.direction.y << " " << r.direction.z << std::endl;*/
	//The ray is described by Point on ray = start(O) + direction(D) * t
	//If a point B on the ray hits a surface it must be inside the plane of that surface
	//We can check if B is inside the surface by making a vector between a point on the surface(A) and B
	//and dot multiply it with the plan's normal and see if it becomes zero
	//(A-B)*n = 0
	//Then the point B is on the plane

	//We want to know at what t-value we get a point B on the ray that is inside the plane
	//After some calculations this formula was found
	//t = (O - A) dot n / D dot n

	//translate the plane to it's local coordinate system rotate it and tranlate it back
	glm::mat4 translation = glm::translate(glm::mat4(1.f), -position);
	glm::mat4 translationBack = glm::translate(glm::mat4(1.f), position);
	glm::mat4 rotat = glm::rotate(glm::rotate(glm::rotate(glm::mat4(1.f), -rotation.x, glm::vec3(1, 0, 0)), -rotation.y, glm::vec3(0, 1, 0)), -rotation.z, glm::vec3(0, 0, 1));
	glm::mat4 toLocal = rotat * translation;

	glm::vec4 temp(translationBack * toLocal * glm::vec4(position + glm::vec3(-dimensions.x / 2, -dimensions.y / 2, 0), 1));
	glm::vec3 lowerLeftCorner(temp.x, temp.y, temp.z);
	temp = glm::vec4(translationBack * toLocal * glm::vec4(position + glm::vec3(dimensions.x / 2, -dimensions.y / 2, 0), 1));
	glm::vec3 lowerRightCorner(temp.x, temp.y, temp.z);
	temp = glm::vec4(translationBack * toLocal * glm::vec4(position + glm::vec3(-dimensions.x / 2, dimensions.y / 2, 0), 1));
	glm::vec3 upperLeftCorner(temp.x, temp.y, temp.z);

	//Find plane normal
	glm::vec3 planeNormal(glm::normalize(glm::cross(lowerRightCorner - lowerLeftCorner, upperLeftCorner - lowerLeftCorner)));

	/*std::cout << "lower left " << lowerLeftCorner.x << " " << lowerLeftCorner.y << " " << lowerLeftCorner.z << std::endl;
	std::cout << "lower right " << lowerRightCorner.x << " " << lowerRightCorner.y << " " << lowerRightCorner.z << std::endl;
	std::cout << "upper left " << upperLeftCorner.x << " " << upperLeftCorner.y << " " << upperLeftCorner.z << std::endl;
	std::cout << "ray start pos " << r.startPosition.x << " " << r.startPosition.y << " " << r.startPosition.z << std::endl;
	std::cout << "ray direction " << r.direction.x << " " << r.direction.y << " " << r.direction.z << std::endl;*/

	//If D dot n == 0 the direction goes along the plane and the ray won't hit the plane
	//If D dot n < 0 the surface can't be seen from the ray's starting point
	//But if the ray is a shadow ray it doesn't matter if we can't see the object, just that it is in the way
	float DdotN = glm::dot(-r.direction, planeNormal);
	if (r.shadowRay)
	{
		if (DdotN < EPSILON && DdotN > -EPSILON)
			return nullptr;
	}
	else
		if (DdotN < EPSILON){
			//std::cout << "DdotN <= 0 " << std::endl;
			return nullptr;
		}

	//std::cout << "ray direction " << r.direction.x << " " << r.direction.y << " " << r.direction.z << std::endl;
	//std::cout << "planeNormal " << planeNormal.x << " " << planeNormal.y << " " << planeNormal.z << std::endl;
	//Find point inside plane -> position, which is the center of the plane
	//Find intersection
	float t;
	if (r.startPosition - position != planeNormal)
	{
		t = glm::dot(r.startPosition - position, planeNormal) / DdotN;
		//std::cout << "1t = " << t << std::endl;
	}
	else
	{
		glm::vec3 temp = r.startPosition - lowerLeftCorner;
		//std::cout << "vector on plane? " << temp.x << " " << temp.y << " " << temp.z << std::endl;
		t = glm::dot(r.startPosition - lowerLeftCorner, planeNormal) / DdotN;
		//std::cout << "2t = " << t << " dot " << glm::dot(r.startPosition - lowerLeftCorner, planeNormal) << std::endl;
	}

	//If t < 0 the plane is behind or inside the ray origin and we aren't interested in it
	if (t < EPSILON){
		//std::cout << "t <= 0 " << std::endl;
		return nullptr;
	}

	//Is B inside the surface bounds?
	//Create vectors that decribe the bounds
	//"Inside" is on the right side of the vector
	temp = glm::vec4(translationBack * toLocal * glm::vec4(position + glm::vec3(dimensions.x / 2, dimensions.y / 2, 0), 1));
	glm::vec3 upperRightCorner(temp.x, temp.y, temp.z);
	//std::cout << "upper right " << upperRightCorner.x << " " << upperRightCorner.y << " " << upperRightCorner.z << std::endl;

	glm::vec3 leftSide(upperLeftCorner - lowerLeftCorner);
	glm::vec3 topSide(upperRightCorner - upperLeftCorner);
	glm::vec3 rightSide(lowerRightCorner - upperRightCorner);
	glm::vec3 bottomSide(lowerLeftCorner - lowerRightCorner);

	//To know if B is on the correct side of the vector we take the cross product of
	//the bounding vector and a vector from the same starting point as the bounding vector and ends in B
	//If the result is a vector along the plane's normal, the point B is on the correct side of the bounding vector
	//If the result is a vector against the plane's normal the point is on the wrong side
	//To know if the vector is going along the normal we take the dot product between them
	//If it is larger than zero they go with each other

	//leftSide
	glm::vec3 B(r.startPosition + t * r.direction);
	glm::vec3 Bvector(B - lowerLeftCorner);
	if (glm::dot(glm::cross(Bvector, leftSide), planeNormal) > 0)
	{
		//topSide
		Bvector = glm::vec3(B - upperLeftCorner);
		if (glm::dot(glm::cross(Bvector, topSide), planeNormal) > 0)
		{
			//rightSide
			Bvector = glm::vec3(B - upperRightCorner);
			if (glm::dot(glm::cross(Bvector, rightSide), planeNormal) > 0)
			{
				//bottomSide
				Bvector = glm::vec3(B - lowerRightCorner);
				if (glm::dot(glm::cross(Bvector, bottomSide), planeNormal) > 0)
				{
					// If the object is blocked
					if (t > r.tMax){
						//std::cout << "t > r.tMax" << std::endl;
						return nullptr;
					}

					//else
					r.tMax = t;
					glm::vec3 intersectionPoint(r.startPosition + t * r.direction);
					//std::cout << "intersected object" << std::endl;
					return new Intersection(intersectionPoint, planeNormal, color, reflectionCoef, t);
				}
			}
		}
	}

	//std::cout << "not inside bounds" << std::endl;
	return nullptr;
}
Esempio n. 15
0
void	ColladaConverter::ConvertRigidBodyRef( btRigidBodyInput& rbInput,btRigidBodyOutput& rbOutput)
{

	const domRigid_body::domTechnique_commonRef techniqueRef = rbInput.m_rigidBodyRef2->getTechnique_common();
	if (techniqueRef)
	{

		if (techniqueRef->getMass())
		{
			rbOutput.m_mass = (float) techniqueRef->getMass()->getValue();
		}
		if (techniqueRef->getDynamic())
		{
			rbOutput.m_isDynamics = techniqueRef->getDynamic()->getValue();
		}
		//a hack to interpret <extra> PhysX profile:
		//when <kinematic> is true, make <dynamic> false...
		//using the DOM is a pain...
		const domExtra_Array& extraArray = rbInput.m_rigidBodyRef2->getExtra_array();
		unsigned int s=0;

		for (s = 0;s< extraArray.getCount();s++)
		{
			const domExtraRef extraRef = extraArray[s];
			const domTechnique_Array techniqueArray = extraRef->getTechnique_array();
			unsigned int t=0;
			for (t=0;t<techniqueArray.getCount();t++)
			{
				const domTechniqueRef techRef = techniqueArray[t];
				const daeElementRefArray elemRefArray = techRef->getContents();
				unsigned int u = 0;
				for (u=0;u<elemRefArray.getCount();u++)
				{
					daeElementRef elemRef = elemRefArray[u];
					daeString elemName = elemRef->getElementName();
					if (elemName && !strcmp(elemName,"kinematic"))
					{
						//how can I make this cast safe?
						const domAny* myAny = (const domAny*)elemRef.cast();
						daeString myVal = myAny->getValue();
						if (myVal)
						{
							if (!strcmp(myVal,"true"))
							{
								printf("revert bug in PhysX .dae export -> <kinematic>true</kinematic> means <dynamic>false</dynamic>\n");
								rbOutput.m_isDynamics = false;
							}
						}
					}
				}
			}
		}

		//shapes
		for (s=0;s<techniqueRef->getShape_array().getCount();s++)
		{
			domRigid_body::domTechnique_common::domShapeRef shapeRef = techniqueRef->getShape_array()[s];

			if (shapeRef->getPlane())
			{
				domPlaneRef planeRef = shapeRef->getPlane();
				if (planeRef->getEquation())
				{
					const domFloat4 planeEq = planeRef->getEquation()->getValue();
					btVector3 planeNormal((btScalar)planeEq.get(0),(btScalar)planeEq.get(1),(btScalar)planeEq.get(2));
					btScalar planeConstant = (btScalar)planeEq.get(3)*(btScalar)m_unitMeterScaling;
					rbOutput.m_colShape = new btStaticPlaneShape(planeNormal,planeConstant);
				}

			}

			if (shapeRef->getBox())
			{
				domBoxRef boxRef = shapeRef->getBox();
				domBox::domHalf_extentsRef	domHalfExtentsRef = boxRef->getHalf_extents();
				domFloat3& halfExtents = domHalfExtentsRef->getValue();
				btScalar x = (btScalar)halfExtents.get(0)*m_unitMeterScaling;
				btScalar y = (btScalar)halfExtents.get(1)*m_unitMeterScaling;
				btScalar z = (btScalar)halfExtents.get(2)*m_unitMeterScaling;
				rbOutput.m_colShape = new btBoxShape(btVector3(x,y,z));
			}
			if (shapeRef->getSphere())
			{
				domSphereRef sphereRef = shapeRef->getSphere();
				domSphere::domRadiusRef radiusRef = sphereRef->getRadius();
				btScalar radius = (btScalar)radiusRef->getValue()*m_unitMeterScaling;
				rbOutput.m_colShape = new btSphereShape(radius);
			}

			if (shapeRef->getCylinder())
			{
				domCylinderRef cylinderRef = shapeRef->getCylinder();
				domFloat height = cylinderRef->getHeight()->getValue()*m_unitMeterScaling;
				domFloat2 radius2 = cylinderRef->getRadius()->getValue();
				domFloat radius0 = radius2.get(0)*m_unitMeterScaling;

				//Cylinder around the local Y axis
				rbOutput.m_colShape = new btCylinderShapeZ(btVector3((btScalar)radius0,(btScalar)height,(btScalar)radius0));

			}

			if (shapeRef->getInstance_geometry())
			{
				const domInstance_geometryRef geomInstRef = shapeRef->getInstance_geometry();
				daeElement* geomElem = geomInstRef->getUrl().getElement();
				//elemRef->getTypeName();
				domGeometry* geom = (domGeometry*) geomElem;
				if (geom && geom->getMesh())
				{
					const domMeshRef meshRef = geom->getMesh();

					//it can be either triangle mesh, or we just pick the vertices/positions

					if (meshRef->getTriangles_array().getCount())
					{

						btTriangleMesh* trimesh = new btTriangleMesh();

						for (unsigned int tg = 0;tg<meshRef->getTriangles_array().getCount();tg++)
						{


							domTrianglesRef triRef = meshRef->getTriangles_array()[tg];
							const domPRef pRef = triRef->getP();
							btIndexedMesh meshPart;
							meshPart.m_triangleIndexStride=0;



							int vertexoffset = -1;
							domInputLocalOffsetRef indexOffsetRef;


							for (unsigned int w=0;w<triRef->getInput_array().getCount();w++)
							{
								domUint offset = triRef->getInput_array()[w]->getOffset();
								daeString str = triRef->getInput_array()[w]->getSemantic();
								if (!strcmp(str,"VERTEX"))
								{
									indexOffsetRef = triRef->getInput_array()[w];
									vertexoffset = (int) offset;
								}
								if ((int) offset > (int) meshPart.m_triangleIndexStride)
								{
									meshPart.m_triangleIndexStride = (int) offset;
								}
							}
							meshPart.m_triangleIndexStride++;
							domListOfUInts indexArray =triRef->getP()->getValue();

							//int*		m_triangleIndexBase;



							meshPart.m_numTriangles = (int) triRef->getCount();

							const domVerticesRef vertsRef = meshRef->getVertices();
							size_t numInputs = vertsRef->getInput_array().getCount();
							for (size_t i=0;i<numInputs;i++)
							{
								domInputLocalRef localRef = vertsRef->getInput_array()[i];
								daeString str = localRef->getSemantic();
								if ( !strcmp(str,"POSITION"))
								{
									domURIFragmentType& frag = localRef->getSource();

									daeElementConstRef constElem = frag.getElement();

									const domSourceRef node = *(const domSourceRef*)&constElem;
									const domFloat_arrayRef flArray = node->getFloat_array();
									if (flArray)
									{
										const domListOfFloats& listFloats = flArray->getValue();

										int k=vertexoffset;
										int t=0;
										int vertexStride = 3;//instead of hardcoded stride, should use the 'accessor'
										for (;t<meshPart.m_numTriangles;t++)
										{
											btVector3 verts[3];
											int index0;
											for (int i=0;i<3;i++)
											{
												index0 = (int) indexArray.get(k)*vertexStride;
												domFloat fl0 = listFloats.get(index0);
												domFloat fl1 = listFloats.get(index0+1);
												domFloat fl2 = listFloats.get(index0+2);
												k+=meshPart.m_triangleIndexStride;
												verts[i].setValue((btScalar)fl0*m_unitMeterScaling,(btScalar)fl1*m_unitMeterScaling,(btScalar)fl2*m_unitMeterScaling);
											}
											trimesh->addTriangle(verts[0],verts[1],verts[2]);
										}
									}
								}
							}



							if (rbOutput.m_isDynamics)
							{
								printf("moving concave <mesh> not supported, transformed into convex\n");
								rbOutput.m_colShape = new btConvexTriangleMeshShape(trimesh);
							} else
							{
								printf("static concave triangle <mesh> added\n");
								bool useQuantizedAabbCompression = false;
								rbOutput.m_colShape = new btBvhTriangleMeshShape(trimesh,useQuantizedAabbCompression);

							}

						}
					} else
						{

							btConvexHullShape* convexHull = new btConvexHullShape();
							int numAddedVerts = 0;

							const domVerticesRef vertsRef = meshRef->getVertices();
							size_t numInputs = vertsRef->getInput_array().getCount();
							for (size_t i=0;i<numInputs;i++)
							{
								domInputLocalRef localRef = vertsRef->getInput_array()[i];
								daeString str = localRef->getSemantic();
								if ( !strcmp(str,"POSITION"))
								{
									domURIFragmentType& frag = localRef->getSource();

									daeElementConstRef constElem = frag.getElement();

									const domSourceRef node = *(const domSourceRef*)&constElem;
									const domFloat_arrayRef flArray = node->getFloat_array();
									if (flArray)
									{
										const domListOfFloats& listFloats = flArray->getValue();
										int vertexStride = 3;//instead of hardcoded stride, should use the 'accessor'
										size_t vertIndex = 0;
										for (vertIndex = 0;vertIndex < listFloats.getCount();vertIndex+=vertexStride)
										{
											domFloat fl0 = listFloats.get(vertIndex);
											domFloat fl1 = listFloats.get(vertIndex+1);
											domFloat fl2 = listFloats.get(vertIndex+2);
											convexHull->addPoint(btPoint3((btScalar)fl0,(btScalar)fl1,(btScalar)fl2) * m_unitMeterScaling);
										}
									}
								}
							}
							//convexHull->addPoint();
							if (numAddedVerts > 0)
							{
								rbOutput.m_colShape = convexHull;
							} else
							{
								delete convexHull;
								printf("no vertices found for convex hull\n");
							}

					}


				}

				if (geom && geom->getConvex_mesh())
				{

					{
						const domConvex_meshRef convexRef = geom->getConvex_mesh();
						daeElementRef otherElemRef = convexRef->getConvex_hull_of().getElement();
						if ( otherElemRef != NULL )
						{
							domGeometryRef linkedGeom = *(domGeometryRef*)&otherElemRef;
							printf( "otherLinked\n");
						} else
						{
							printf("convexMesh polyCount = %i\n",convexRef->getPolygons_array().getCount());
							printf("convexMesh triCount = %i\n",convexRef->getTriangles_array().getCount());

						}
					}



					btConvexHullShape* convexHullShape = new btConvexHullShape(0,0);

					//it is quite a trick to get to the vertices, using Collada.
					//we are not there yet...

					const domConvex_meshRef convexRef = geom->getConvex_mesh();
					//daeString urlref = convexRef->getConvex_hull_of().getURI();
					daeString urlref2 = convexRef->getConvex_hull_of().getOriginalURI();
					if (urlref2)
					{
						daeElementRef otherElemRef = convexRef->getConvex_hull_of().getElement();
						//	if ( otherElemRef != NULL )
						//	{
						//		domGeometryRef linkedGeom = *(domGeometryRef*)&otherElemRef;

						// Load all the geometry libraries
						for ( unsigned int i = 0; i < m_dom->getLibrary_geometries_array().getCount(); i++)
						{
							domLibrary_geometriesRef libgeom = m_dom->getLibrary_geometries_array()[i];
							//int index = libgeom->findLastIndexOf(urlref2);
							//can't find it

							for ( unsigned int  i = 0; i < libgeom->getGeometry_array().getCount(); i++)
							{
								//ReadGeometry(  );
								domGeometryRef lib = libgeom->getGeometry_array()[i];
								if (!strcmp(lib->getId(),urlref2))
								{
									//found convex_hull geometry
									domMesh			*meshElement		= lib->getMesh();//linkedGeom->getMesh();
									if (meshElement)
									{
										const domVerticesRef vertsRef = meshElement->getVertices();
										size_t numInputs = vertsRef->getInput_array().getCount();
										for (size_t i=0;i<numInputs;i++)
										{
											domInputLocalRef localRef = vertsRef->getInput_array()[i];
											daeString str = localRef->getSemantic();
											if ( !strcmp(str,"POSITION"))
											{
												domURIFragmentType& frag = localRef->getSource();

												daeElementConstRef constElem = frag.getElement();

												const domSourceRef node = *(const domSourceRef*)&constElem;
												const domFloat_arrayRef flArray = node->getFloat_array();
												if (flArray)
												{
													int numElem = (int) flArray->getCount();
													const domListOfFloats& listFloats = flArray->getValue();

													for (int k=0;k+2<numElem;k+=3)
													{
														domFloat fl0 = listFloats.get(k);
														domFloat fl1 = listFloats.get(k+1);
														domFloat fl2 = listFloats.get(k+2);
														//printf("float %f %f %f\n",fl0,fl1,fl2);

														convexHullShape->addPoint(btPoint3((btScalar)fl0,(btScalar)fl1,(btScalar)fl2) * m_unitMeterScaling);
													}
												}
											}
										}
									}
								}
							}
						}
					} else
					{
						//no getConvex_hull_of but direct vertices
						const domVerticesRef vertsRef = convexRef->getVertices();
						size_t numInputs = vertsRef->getInput_array().getCount();
						for (size_t i=0;i<numInputs;i++)
						{
							domInputLocalRef localRef = vertsRef->getInput_array()[i];
							daeString str = localRef->getSemantic();
							if ( !strcmp(str,"POSITION"))
							{
								domURIFragmentType& frag = localRef->getSource();

								daeElementConstRef constElem = frag.getElement();

								const domSourceRef node = *(const domSourceRef*)&constElem;
								const domFloat_arrayRef flArray = node->getFloat_array();
								if (flArray)
								{
									int numElem = (int) flArray->getCount();
									const domListOfFloats& listFloats = flArray->getValue();

									for (int k=0;k+2<numElem;k+=3)
									{
										domFloat fl0 = listFloats.get(k);
										domFloat fl1 = listFloats.get(k+1);
										domFloat fl2 = listFloats.get(k+2);
										//printf("float %f %f %f\n",fl0,fl1,fl2);

										convexHullShape->addPoint(btPoint3((btScalar)fl0,(btScalar)fl1,(btScalar)fl2)*m_unitMeterScaling);
									}

								}

							}


						}


					}

					if (convexHullShape->getNumVertices())
					{
						rbOutput.m_colShape = convexHullShape;
						printf("created convexHullShape with %i points\n",convexHullShape->getNumVertices());
					} else
					{
						delete convexHullShape;
						printf("failed to create convexHullShape\n");
					}


					//domGeometryRef linkedGeom = *(domGeometryRef*)&otherElemRef;

					printf("convexmesh\n");

				}
			}

			//if more then 1 shape, or a non-identity local shapetransform
			//use a compound

			bool hasShapeLocalTransform = ((shapeRef->getRotate_array().getCount() > 0) ||
				(shapeRef->getTranslate_array().getCount() > 0));

			if (rbOutput.m_colShape)
			{
				if ((techniqueRef->getShape_array().getCount()>1) ||
					(hasShapeLocalTransform))
				{

					if (!rbOutput.m_compoundShape)
					{
						rbOutput.m_compoundShape = new btCompoundShape();
					}

					btTransform localTransform;
					localTransform.setIdentity();
					if (hasShapeLocalTransform)
					{
					localTransform = GetbtTransformFromCOLLADA_DOM(
						emptyMatrixArray,
						shapeRef->getRotate_array(),
						shapeRef->getTranslate_array(),
						m_unitMeterScaling
						);
					}

					rbOutput.m_compoundShape->addChildShape(localTransform,rbOutput.m_colShape);
					rbOutput.m_colShape = 0;
				}
			}


		}//for each shape
	}


}
bool	btPolyhedralConvexShape::initializePolyhedralFeatures()
{
	if (m_polyhedron)
		btAlignedFree(m_polyhedron);
	
	void* mem = btAlignedAlloc(sizeof(btConvexPolyhedron),16);
	m_polyhedron = new (mem) btConvexPolyhedron;

	btAlignedObjectArray<btVector3> tmpVertices;
	for (int i=0;i<getNumVertices();i++)
	{
		btVector3& newVertex = tmpVertices.expand();
		getVertex(i,newVertex);
	}

	btConvexHullComputer conv;
	conv.compute(&tmpVertices[0].getX(), sizeof(btVector3),tmpVertices.size(),0.f,0.f);

	

	btAlignedObjectArray<btVector3> faceNormals;
	int numFaces = conv.faces.size();
	faceNormals.resize(numFaces);
	btConvexHullComputer* convexUtil = &conv;

	
	
	m_polyhedron->m_faces.resize(numFaces);
	int numVertices = convexUtil->vertices.size();
	m_polyhedron->m_vertices.resize(numVertices);
	for (int p=0;p<numVertices;p++)
	{
		m_polyhedron->m_vertices[p] = convexUtil->vertices[p];
	}

	for (int i=0;i<numFaces;i++)
	{
		int face = convexUtil->faces[i];
		//printf("face=%d\n",face);
		const btConvexHullComputer::Edge*  firstEdge = &convexUtil->edges[face];
		const btConvexHullComputer::Edge*  edge = firstEdge;

		btVector3 edges[3];
		int numEdges = 0;
		//compute face normals

		//btScalar maxCross2 = 0.f;
		//int chosenEdge = -1;

		do
		{
			
			int src = edge->getSourceVertex();
			m_polyhedron->m_faces[i].m_indices.push_back(src);
			int targ = edge->getTargetVertex();
			btVector3 wa = convexUtil->vertices[src];

			btVector3 wb = convexUtil->vertices[targ];
			btVector3 newEdge = wb-wa;
			newEdge.normalize();
			if (numEdges<2)
				edges[numEdges++] = newEdge;

			edge = edge->getNextEdgeOfFace();
		} while (edge!=firstEdge);

		btScalar planeEq = 1e30f;

		
		if (numEdges==2)
		{
			faceNormals[i] = edges[0].cross(edges[1]);
			faceNormals[i].normalize();
			m_polyhedron->m_faces[i].m_plane[0] = -faceNormals[i].getX();
			m_polyhedron->m_faces[i].m_plane[1] = -faceNormals[i].getY();
			m_polyhedron->m_faces[i].m_plane[2] = -faceNormals[i].getZ();
			m_polyhedron->m_faces[i].m_plane[3] = planeEq;

		}
		else
		{
			btAssert(0);//degenerate?
			faceNormals[i].setZero();
		}

		for (int v=0;v<m_polyhedron->m_faces[i].m_indices.size();v++)
		{
			btScalar eq = m_polyhedron->m_vertices[m_polyhedron->m_faces[i].m_indices[v]].dot(faceNormals[i]);
			if (planeEq>eq)
			{
				planeEq=eq;
			}
		}
		m_polyhedron->m_faces[i].m_plane[3] = planeEq;
	}


	if (m_polyhedron->m_faces.size() && conv.vertices.size())
	{

		for (int f=0;f<m_polyhedron->m_faces.size();f++)
		{
			
			btVector3 planeNormal(m_polyhedron->m_faces[f].m_plane[0],m_polyhedron->m_faces[f].m_plane[1],m_polyhedron->m_faces[f].m_plane[2]);
			btScalar planeEq = m_polyhedron->m_faces[f].m_plane[3];

			btVector3 supVec = localGetSupportingVertex(-planeNormal);

			if (supVec.dot(planeNormal)<planeEq)
			{
				m_polyhedron->m_faces[f].m_plane[0] *= -1;
				m_polyhedron->m_faces[f].m_plane[1] *= -1;
				m_polyhedron->m_faces[f].m_plane[2] *= -1;
				m_polyhedron->m_faces[f].m_plane[3] *= -1;
				int numVerts = m_polyhedron->m_faces[f].m_indices.size();
				for (int v=0;v<numVerts/2;v++)
				{
					btSwap(m_polyhedron->m_faces[f].m_indices[v],m_polyhedron->m_faces[f].m_indices[numVerts-1-v]);
				}
			}
		}
	}

	

	m_polyhedron->initialize();

	return true;
}