コード例 #1
0
Ray* PlaneObject::transmitRay(Ray* r) {
	float u, v, w = 0;
	Vector3D intersect = getIntersectionPoint(r);
	getTextureCordsAtPoint(&(intersect), &u, &v, &w);
	// In bounds?
	if((std::abs(u)>(width/2)) | (std::abs(v)>(height/2))) {
		Ray* re = new Ray();
		Vector3D pos = r->getPosition();
		re->setPosition(&pos);
		Vector3D dir = r->getDirection();
		re->setDirection(&dir);
		re->wavelength = r->wavelength;
		return re;
	}

	// Get our reflected ray
	Vector3D dir = r->getDirection();
	/*
	printf("plane strike at (%f, %f, %f)\n", intersect.x, intersect.y, intersect.z );
	printf("   Ray had orgin (%f, %f, %f) and direction <%f, %f, %f>.\n",
			r->getPosition().x,
			r->getPosition().y,
			r->getPosition().z,
			r->getDirection().x,
			r->getDirection().y,
			r->getDirection().z
	);
	*/
	Vector3D psp_norm = intersect-focal_point;
	psp_norm.normaliseSelf();
	return mMaterial->transmitRay(&intersect, &dir, normal, &psp_norm, u, v, w, r->wavelength);
}
コード例 #2
0
ファイル: RayTracer.cpp プロジェクト: aouaki/TheBeatGoesOn
//Function giving the good intersection point (if intersection) or return 0 if not
//The function links the adress intersectionPoint, IntersectionPointNormal, obj to the actual intersection point, its normal
//and the object it belongs to.
int RayTracer::getIntersectionPoint(const Vec3Df & camPos,
                                    const Vec3Df & dir,
                                    Vec3Df & intersectionPoint,
                                    Vec3Df & IntersPointNormal)
{
    float adress;

    return getIntersectionPoint(camPos,
                                dir,
                                intersectionPoint,
                                IntersPointNormal,
                                adress);

}
コード例 #3
0
ファイル: ofApp.cpp プロジェクト: jbobrow/MonTagger
//--------------------------------------------------------------
void ofApp::draw(){
    
    // draw the incoming, the grayscale, the bg and the thresholded difference
    ofSetHexColor(0xffffff);
    colorImg.draw(displayPadding,displayPadding, displaySize.x, displaySize.y);
    grayImage.draw(displayPadding*2 + displaySize.x, displayPadding, displaySize.x, displaySize.y);
    grayBg.draw(displayPadding, displayPadding*2 + displaySize.y, displaySize.x, displaySize.y);
    grayDiff.draw(displayPadding*2 + displaySize.x,displayPadding*2 + displaySize.y, displaySize.x, displaySize.y);
    
    // then draw the contours:
    ofFill();
    ofSetHexColor(0x333333);
    ofRect(displayPadding*2 + displaySize.x, displayPadding*3 + 2*displaySize.y, displaySize.x, displaySize.y);
    ofSetHexColor(0xffffff);
    
    int totalPts = 0;
    
    // scale ratio
    float ratio = displaySize.x/videoSize.x;
    ofPoint startA, startB, endA, endB;
    ofPushMatrix();
    ofTranslate(displayPadding*2 + displaySize.x, displayPadding*3 + 2*displaySize.y);
    // draw each blob individually from the blobs vector,
    for (int i = 0; i < contourFinder.nBlobs; i++){
        
        // draw the points of the contour
        for(int j=0; j < contourFinder.blobs[i].nPts; j++){
            totalPts++;
            ofNoFill();
            ofSetHexColor(0xff00ff);
            ofCircle( ratio*contourFinder.blobs[i].pts[j].x, ratio*contourFinder.blobs[i].pts[j].y, 3);
        }
        
        // connect them by a line
        ofBeginShape();
        for(int j=0; j < contourFinder.blobs[i].nPts; j++){
            ofNoFill();
            ofSetHexColor(0xffff00);
            ofVertex( ratio*contourFinder.blobs[i].pts[j].x, ratio*contourFinder.blobs[i].pts[j].y);
        }
        ofEndShape();
        
        // set start and end points
        if(i == 0){
            getLineEndPoints(contourFinder.blobs[i], startA, endA);

            // draw the end points
            ofFill();
            ofSetHexColor(0xffffff);
            ofCircle( ratio*startA.x, ratio*startA.y, 3);
            ofCircle( ratio*endA.x, ratio*endA.y, 3);
        }
        else if(i == 1){
            getLineEndPoints(contourFinder.blobs[i], startB, endB);

            // draw the end points
            ofFill();
            ofSetHexColor(0xffffff);
            ofCircle( ratio*startB.x, ratio*startB.y, 3);
            ofCircle( ratio*endB.x, ratio*endB.y, 3);
        }
    }
    
    ofPoint lineAStart, lineBStart, lineAEnd, lineBEnd;
    
    
    // draw a point at the intersection of the two lines
    ofPoint intersection;
//    ofLineSegmentIntersection(startA, endA, startB, endB, intersection);
    getIntersectionPoint(startA, endA, startB, endB, intersection);
    ofFill();
    ofSetHexColor(0xffaa00);
    ofCircle(ratio*intersection.x, ratio*intersection.y, 5);
    
    ofPopMatrix();

    // not finding an intersection because I am not yet elongating the line to the edges of the screen
    // TODO: use the direction of the unit vector from each line, find the y intercept, and draw a line
    // from the top to bottom and left to right
    
    // finally, a report:
    ofSetHexColor(0xffffff);
    stringstream reportStr;
    reportStr << "bg subtraction and blob detection" << endl
    << "press ' ' to capture bg" << endl
    << "threshold " << threshold << " (press: +/-)" << endl
    << "num points found " << totalPts << endl
    << "point A (" << startA.x << "," << startA.y << ") -> (" << endA.x << "," << endA.y << ")" << endl
    << "point B (" << startB.x << "," << startB.y << ") -> (" << endB.x << "," << endB.y << ")" << endl
    << "intersection @ (" << intersection.x << "," << intersection.y << ")" << endl
    << "num blobs found " << contourFinder.nBlobs << ", fps: " << ofGetFrameRate();
    ofDrawBitmapString(reportStr.str(), displayPadding, displayPadding*4 + 2*displaySize.y);

}
コード例 #4
0
ファイル: CollisionWorld.cpp プロジェクト: xinbot/6.172
void CollisionWorld::collisionSolver(Line *l1, Line *l2, IntersectionType
                                     intersectionType)
{
   // Despite our efforts to determine whether lines will intersect ahead of
   // time (and to modify their velocities appropriately), our simplified model
   // can sometimes cause lines to intersect.  In such a case, we compute
   // velocities so that the two lines can get unstuck in the fastest possible
   // way, while still conserving momentum and kinetic energy.
   if (intersectionType == ALREADY_INTERSECTED) {
      Vec p = getIntersectionPoint(l1->p1, l1->p2, l2->p1, l2->p2);

      if ((l1->p1 - p).length() < (l1->p2 - p).length()) {
         l1->vel = (l1->p2 - p).normalize() * l1->vel.length();
      } else {
         l1->vel = (l1->p1 - p).normalize() * l1->vel.length();
      }
      if ((l2->p1 - p).length() < (l2->p2 - p).length()) {
         l2->vel = (l2->p2 - p).normalize() * l2->vel.length();
      } else {
         l2->vel = (l2->p1 - p).normalize() * l2->vel.length();
      }
      return;
   }

   // Compute the collision face/normal vectors
   Vec face;
   Vec normal;
   if (intersectionType == L1_WITH_L2) {
      Vec v(*l2);
      face = v.normalize();
   } else {
      Vec v(*l1);
      face = v.normalize();
   }
   normal = face.orthogonal();

   // Obtain each line's velocity components with respect to the collision
   // face/normal vectors.
   double v1Face = l1->vel.dotProduct(face);
   double v2Face = l2->vel.dotProduct(face);
   double v1Normal = l1->vel.dotProduct(normal);
   double v2Normal = l2->vel.dotProduct(normal);

   // Compute the mass of each line (we simply use its length).
   double m1 = (l1->p1 - l1->p2).length();
   double m2 = (l2->p1 - l2->p2).length();

   // Perform the collision calculation (computes the new velocities along the
   // direction normal to the collision face such that momentum and kinetic
   // energy are conserved).
   double newV1Normal = ((m1 - m2) / (m1 + m2)) * v1Normal +
                        (2 * m2 / (m1 + m2)) * v2Normal;
   double newV2Normal = (2 * m1 / (m1 + m2)) * v1Normal +
                        ((m2 - m1) / (m2 + m1)) * v2Normal;

   // Combine the resulting velocities.
   l1->vel = normal * newV1Normal + face * v1Face;
   l2->vel = normal * newV2Normal + face * v2Face;

   return;
}
コード例 #5
0
ファイル: RayTracer.cpp プロジェクト: aouaki/TheBeatGoesOn
QImage RayTracer::render (const Vec3Df & camPos,
                          const Vec3Df & direction,
                          const Vec3Df & upVector,
                          const Vec3Df & rightVector,
                          float fieldOfView,
                          float aspectRatio,
                          unsigned int screenWidth,
                          unsigned int screenHeight) {
    QImage image (QSize (screenWidth, screenHeight), QImage::Format_RGB888);
    Scene * scene = Scene::getInstance ();
    std::vector<Light> lights = scene->getLights();
    Light light = lights[0];

    QProgressDialog progressDialog ("Raytracing...", "Cancel", 0, 100);
    progressDialog.show ();

    //#pragma omp parallel for
    for (unsigned int i = 0; i < screenWidth; i++) {
        progressDialog.setValue ((100*i)/screenWidth);
        for (unsigned int j = 0; j < screenHeight; j++) {


            float tanX = tan (fieldOfView)*aspectRatio;
            float tanY = tan (fieldOfView);

            //Nombre de découpe par dimension du pixel (Antialiasing)
            int aliaNb = 2;
            if(!activeAA) aliaNb=1;
            aliaNb++;

            Vec3Df c (backgroundColor);
            Vec3Df tempc(0.,0.,0.);
            for(int pixi=1; pixi<aliaNb; pixi++){
                for(int pixj=1; pixj<aliaNb; pixj++){
                    Vec3Df stepX = (float (i)-0.5+float(pixi)/float(aliaNb) - screenWidth/2.f)/screenWidth * tanX * rightVector;
                    Vec3Df stepY = (float (j)-0.5+float(pixj)/float(aliaNb) - screenHeight/2.f)/screenHeight * tanY * upVector;
                    Vec3Df step = stepX + stepY;

                    Vec3Df dir = direction + step;
                    dir.normalize ();
                    Vec3Df intersectionPoint;
                    Vec3Df IntersPointNormal;
                    float occlusion;

                    int idObj = getIntersectionPoint(camPos,dir,intersectionPoint,IntersPointNormal,occlusion);

                    if(idObj>=0)
                    {

                        tempc += Brdf(camPos, IntersPointNormal, idObj,intersectionPoint,occlusion,0)/std::pow(aliaNb-1,2);
                        c=tempc;

                    }
                }
            }

            image.setPixel (i, j, qRgb (clamp (c[0], 0, 255), clamp (c[1], 0, 255), clamp (c[2], 0, 255)));
        }
    }
    progressDialog.setValue (100);
    return image;
}
コード例 #6
0
ファイル: RayTracer.cpp プロジェクト: aouaki/TheBeatGoesOn
Vec3Df RayTracer::Brdf(const Vec3Df & camPos,
                       const Vec3Df & normal,
                       int idObj,
                       const Vec3Df & intersectionPoint,
                       float occlusion,
                       int PTRays){

    Scene * scene = Scene::getInstance ();
    std::vector<Light> lights = scene->getLights();
    Object & object = scene->getObjects()[idObj];
    Vec3Df ci;
    for(unsigned int i =0;i<lights.size();i++)
    {
        Light light = lights[i];
        Vec3Df n = normal;
        n.normalize();
        Vec3Df wi = light.getPos() - intersectionPoint;
        wi.normalize();
        Vec3Df w0 = (camPos-intersectionPoint);
        w0.normalize();
        Vec3Df r = 2*(Vec3Df::dotProduct(wi,n))*n-wi;
        r.normalize();
        float diffuse = Vec3Df::dotProduct(wi, n);
        float shininess = 11;
        float spec = pow(std::max(Vec3Df::dotProduct(r,w0),0.f),shininess);
        diffuse = std::max(diffuse,0.0f);
        Vec3Df lightColor = light.getColor();
        Material material = object.getMaterial();
        float matDiffuse = material.getDiffuse();
        float matSpecular = material.getSpecular();
        Vec3Df matDiffuseColor = material.getColor();
        Vec3Df matSpecularColor = material.getColor();

        Vec3Df intersectionPoint2;
        Vec3Df IntersPointNormal2;

        //Area Lighting
        float radius=0.3f;

        //Miroir
        if(scene->getObjects()[idObj].getRefl()>0 && activeMirror)
        {
            Vec3Df vDir= intersectionPoint-camPos;
            vDir.normalize();
            Vec3Df planA = Vec3Df::crossProduct(n, Vec3Df::crossProduct(vDir, n));
            Vec3Df newDir = Vec3Df::dotProduct(planA, vDir)*planA - Vec3Df::dotProduct(vDir, n)*n;
            float occ;
            int obj = getIntersectionPoint(intersectionPoint, newDir, intersectionPoint2, IntersPointNormal2, occ);
            if(obj>-1)
            {

                if(activeShadow)
                {
                    if(getIntersectionPoint(intersectionPoint,-intersectionPoint+light.getPos(),intersectionPoint2,IntersPointNormal2)==-1)
                    {
                        ci += Brdf(intersectionPoint,IntersPointNormal2,obj,intersectionPoint2,occ,0)*scene->getObjects()[idObj].getRefl();
                    }
                }
                else
                    ci += Brdf(intersectionPoint,IntersPointNormal2,obj,intersectionPoint2,occ,0)*scene->getObjects()[idObj].getRefl();
            }
        }

        //PathTracing
        if(activePT)
        {
            if(PTRays < depthPT)
            {
               for(int h=0; h< nbRayPT; h++)
                {
                    Vec3Df n1;
                    Vec3Df n2;

                    normal.getTwoOrthogonals(n1,n2);


                    float a = ((float)std::rand())/((float)RAND_MAX);
                    float b = ((float)std::rand())/((float)RAND_MAX)*2.-1.;
                    float c = ((float)std::rand())/((float)RAND_MAX)*2.-1.;

                    Vec3Df dir = normal*a+n1*b+n2*c;
                    dir.normalize();

                    int objPT = getIntersectionPoint(intersectionPoint,dir,intersectionPoint2,IntersPointNormal2);

                    ci+=Brdf(intersectionPoint,IntersPointNormal2,objPT,intersectionPoint2,0.,PTRays+1)/(nbRayPT*depthPT);
                }


            }



            //si pt<pt_max

            //lancer plein de rayons

            //contribution+=brdf(pt+1)
        }

        if(scene->getObjects()[idObj].getRefl()<1.0 || true)
        {
            if(nbRayShadow>0 && activeShadow)
                for(int p = 0;p<nbRayShadow;p++)
                {
                    float a = ((float)std::rand())/((float)RAND_MAX)*2.-1.;
                    float b = ((float)std::rand())/((float)RAND_MAX)*2.-1.;
                    float c = ((float)std::rand())/((float)RAND_MAX)*2.-1.;

                    float sum = a+b+c;
                    a=a/sum*radius;
                    b=b/sum*radius;
                    c=c/sum*radius;
                    Vec3Df lightposbis;

                    lightposbis[0]=light.getPos()[0]+a;
                    lightposbis[1]=light.getPos()[1]+b;
                    lightposbis[2]=light.getPos()[2]+c;

                    if(getIntersectionPoint(intersectionPoint,-intersectionPoint+lightposbis,intersectionPoint2,IntersPointNormal2)==-1)
                    {
                        ci += (((matDiffuse * diffuse * matDiffuseColor) +( matSpecular * spec * matSpecularColor*0.5))*lightColor)*255/nbRayShadow;
                    }
                }
            else if(activeShadow)
            {
                if(getIntersectionPoint(intersectionPoint,-intersectionPoint+light.getPos(),intersectionPoint2,IntersPointNormal2)==-1)
                {
                    ci += (((matDiffuse * diffuse * matDiffuseColor) +( matSpecular * spec * matSpecularColor*0.5))*lightColor)*255;
                }
            }
            else //sans ombre
            {
                ci += (((matDiffuse * diffuse * matDiffuseColor) +( matSpecular * spec * matSpecularColor*0.5))*lightColor)*255;
            }
            //ci += (((matDiffuse * diffuse * matDiffuseColor) +( matSpecular * spec * matSpecularColor*0.5))*lightColor)*255/nbrayshadow;
        }



    }

    if(activeAO) return ci*(1.f-occlusion);
    else return ci;
}