Пример #1
0
void run(){

  listener->waitForNewFrame(*frames);
  libfreenect2::Frame *rgb = (*frames)[libfreenect2::Frame::Color];
  libfreenect2::Frame *ir = (*frames)[libfreenect2::Frame::Ir];
  libfreenect2::Frame *depth = (*frames)[libfreenect2::Frame::Depth];
  
  
  static Img8u colorImage(Size(rgb->width,rgb->height),3);

  if(depth){
    Img32f depthImage(Size(depth->width,depth->height),formatMatrix,
                      std::vector<float*>(1, (float*)depth->data));
    gui["hdepth"] = &depthImage;
  }else{
    throw ICLException("error detected in libfreenect2.so: please ensure to deactivate"
                       " visualization in ....h by setting debug_on to false");
  }
  
  Img32f irImage(Size(ir->width,ir->height),formatMatrix, 
                 std::vector<float*>(1,(float*)ir->data));
  
  interleavedToPlanar(rgb->data, &colorImage);
  colorImage.swapChannels(0,2);
  
  //gui["hdepth"] = &depthImage;
  gui["hcolor"] = &colorImage;
  gui["hir"] = &irImage;
  
  listener->release(*frames);
}
Пример #2
0
int main(int argc, char **argv)
{
	QApplication app(argc, argv);

	QKinectGrabberV1 k;
	k.start();

	QImageWidget colorWidget;
	//colorWidget.setMinimumSize(720, 480);
	colorWidget.setMinimumSize(640, 480);
	colorWidget.show();
	QApplication::connect(&k, SIGNAL(colorImage(QImage)), &colorWidget, SLOT(setImage(QImage)));

	QImageWidget depthWidget;
	//depthWidget.setMinimumSize(512, 424);
	depthWidget.setMinimumSize(640, 480);
	depthWidget.show();
	QApplication::connect(&k, SIGNAL(depthImage(QImage)), &depthWidget, SLOT(setImage(QImage)));

	//QImageWidget infraredWidget;
	//infraredWidget.setMinimumSize(512, 424);
	//infraredWidget.show();
	//QApplication::connect(&k, SIGNAL(infraredImage(QImage)), &infraredWidget, SLOT(setImage(QImage)));

	int app_exit = app.exec();
	k.stop();
	return app_exit;

}
Пример #3
0
PointBufferPtr KinectIO::getBuffer()
{
	// Get depth image from sensor
	std::vector<short> depthImage(480 * 680, 0);
	m_grabber->getDepthImage(depthImage);

	std::vector<uint8_t> colorImage(480 * 680 * 3, 0);
	m_grabber->getColorImage(colorImage);

	std::set<int> nans;
	for(size_t i = 0; i < depthImage.size(); i++)
	{
		if(isnan(depthImage[i])) nans.insert(i);
	}

	// Return null pointer if no image was grabbed
	if(depthImage.size() == 0) return PointBufferPtr();

	size_t numPoints = depthImage.size() - nans.size();

	// Convert depth image into point cloud
	PointBufferPtr buffer(new PointBuffer);
	floatArr points(new float[numPoints * 3]);
	ucharArr colors(new uchar[numPoints * 3]);

	int i,j;
	int index = 0;
	int c = 0;
	for (i = 0; i < 480; i++) {
		for (j = 0; j < 640; j++) {

			if(nans.find(c) == nans.end())
			{
				Eigen::Vector4f v;
				v << j, i, (float)(depthImage[i * 640 + j]), 1.0f;
				v = m_depthMatrix.transpose() * v;

				points[3 * index    ] = v(0) / v(3);
				points[3 * index + 1] = v(1) / v(3);
				points[3 * index + 2] = v(2) / v(3);

				colors[3 * index    ] = colorImage[3 * c    ];
				colors[3 * index + 1] = colorImage[3 * c + 1];
				colors[3 * index + 2] = colorImage[3 * c + 2];
				index++;
			}
			c++;
		}
	}

	buffer->setPointArray(points, numPoints);
	buffer->setPointColorArray(colors, numPoints);
	return buffer;
}
//Create depth image from raw measurements for visualization
cv::Mat depthBasedSegmentation::renderDepthMap(cv::Mat depthMap){
	cv::Mat depthImage(depthMap.size(), CV_8UC1);
	double minVal, maxVal;
	cv::Point minLoc, maxLoc;
	cv::minMaxLoc(depthMap, &minVal, &maxVal, &minLoc, &maxLoc);

	for(int i=0;i<=depthImage.rows-1;i++)
		for(int j=0;j<=depthImage.cols-1;j++){
			if(validityMask.at<unsigned char>(i,j)!=0) {
				depthImage.at<unsigned char>(i,j) = 255*(depthMap.at<short>(i,j)-maxVal)/(minVal-maxVal);
			} else{
				depthImage.at<unsigned char>(i,j) = 0;
			}
		}

	return depthImage;
}
Пример #5
0
void runCuda(){

  // Map OpenGL buffer object for writing from CUDA on a single GPU
  // No data is moved (Win & Linux). When mapped to CUDA, OpenGL should not use this buffer
  
  if(iterations<renderCam->iterations){
    uchar4 *dptr=NULL;
    iterations++;
    cudaGLMapBufferObject((void**)&dptr, pbo);
  
    //pack geom and material arrays
    geom* geoms = new geom[renderScene->objects.size()];
    material* materials = new material[renderScene->materials.size()];
    map* maps = new map[renderScene->maps.size()];

    for(int i=0; i<renderScene->objects.size(); i++){
      geoms[i] = renderScene->objects[i];
    }
    for(int i=0; i<renderScene->materials.size(); i++){
      materials[i] = renderScene->materials[i];
    }
    
  	for(int i=0; i<renderScene->maps.size(); i++){
      maps[i] = renderScene->maps[i];
    }
    // execute the kernel
	if(!textureMode)
		cudaRaytraceCore(dptr, renderCam, targetFrame, iterations, materials, renderScene->materials.size(),maps,renderScene->maps.size(), geoms, renderScene->objects.size(), mblur,dof);
	else
		cudaRaytraceCoreT(dptr, renderCam, targetFrame, iterations, materials, renderScene->materials.size(),maps,renderScene->maps.size(), geoms, renderScene->objects.size(), mblur,dof);

	// unmap buffer object
    cudaGLUnmapBufferObject(pbo);
  }else{

    if(!finishedRender){
      //output image file
      image outputImage(renderCam->resolution.x, renderCam->resolution.y);
	  image depthImage(renderCam->resolution.x, renderCam->resolution.y);
      for(int x=0; x<renderCam->resolution.x; x++){
        for(int y=0; y<renderCam->resolution.y; y++){
          int index = x + (y * renderCam->resolution.x);
		  glm::vec3 justRGB(renderCam->image[index].x,renderCam->image[index].y,renderCam->image[index].z);
          outputImage.writePixelRGB(renderCam->resolution.x-1-x,y,justRGB);
		  float d = abs(renderCam->image[index].w-renderCam->positions[targetFrame].z)/40.0f;
		  depthImage.writePixelRGB(renderCam->resolution.x-1-x,y,  glm::vec3(d,d,d));
        }
      }
      
      gammaSettings gamma;
      gamma.applyGamma = true;
      gamma.gamma = 1.0/2.2;
      gamma.divisor = renderCam->iterations;
      outputImage.setGammaSettings(gamma);
      string filename = renderCam->imageName;
      string s;
      stringstream out;
      out << targetFrame;
      s = out.str();
      utilityCore::replaceString(filename, ".bmp", "."+s+".bmp");
      utilityCore::replaceString(filename, ".png", "."+s+".png");
      outputImage.saveImageRGB(filename);
	  depthImage.saveImageRGB("depth."+s+".bmp");
      cout << "Saved frame " << s << " to " << filename << endl;
      finishedRender = true;
      if(singleFrameMode==true){
        //cudaDeviceReset(); 
        exit(0);
      }
    }
    if(targetFrame<renderCam->frames-1){

      //clear image buffer and move onto next frame
      targetFrame++;
      iterations = 0;
      for(int i=0; i<renderCam->resolution.x*renderCam->resolution.y; i++){
        renderCam->image[i] = glm::vec4(0,0,0,-1);
      }
      //cudaDeviceReset(); 
      finishedRender = false;
    }
  }
  
}
Пример #6
0
void FramebufferGLTest::read() {
    #ifndef MAGNUM_TARGET_GLES
    if(!Context::current()->isExtensionSupported<Extensions::GL::ARB::framebuffer_object>())
        CORRADE_SKIP(Extensions::GL::ARB::framebuffer_object::string() + std::string(" is not available."));
    #endif

    Renderbuffer color;
    #ifndef MAGNUM_TARGET_GLES2
    color.setStorage(RenderbufferFormat::RGBA8, Vector2i(128));
    #else
    color.setStorage(RenderbufferFormat::RGBA4, Vector2i(128));
    #endif

    /* Separate depth and stencil renderbuffers are not supported (or at least
       on my NVidia, thus we need to do this juggling with one renderbuffer */
    Renderbuffer depthStencil;
    #ifdef MAGNUM_TARGET_GLES2
    if(Context::current()->isExtensionSupported<Extensions::GL::OES::packed_depth_stencil>())
    #endif
    {
        #ifdef MAGNUM_TARGET_GLES2
        Debug() << "Using" << Extensions::GL::OES::packed_depth_stencil::string();
        #endif
        depthStencil.setStorage(RenderbufferFormat::Depth24Stencil8, Vector2i(128));
    }
    #ifdef MAGNUM_TARGET_GLES2
    else depthStencil.setStorage(RenderbufferFormat::DepthComponent16, Vector2i(128));
    #endif

    Framebuffer framebuffer({{}, Vector2i(128)});
    framebuffer.attachRenderbuffer(Framebuffer::ColorAttachment(0), color)
               .attachRenderbuffer(Framebuffer::BufferAttachment::Depth, depthStencil);

    #ifdef MAGNUM_TARGET_GLES2
    if(Context::current()->isExtensionSupported<Extensions::GL::OES::packed_depth_stencil>())
    #endif
    {
        framebuffer.attachRenderbuffer(Framebuffer::BufferAttachment::Stencil, depthStencil);
    }

    MAGNUM_VERIFY_NO_ERROR();
    CORRADE_COMPARE(framebuffer.checkStatus(FramebufferTarget::ReadDraw), Framebuffer::Status::Complete);

    Renderer::setClearColor(Math::normalize<Color4>(Color4ub(128, 64, 32, 17)));
    Renderer::setClearDepth(Math::normalize<Float, UnsignedShort>(48352));
    Renderer::setClearStencil(67);
    framebuffer.clear(FramebufferClear::Color|FramebufferClear::Depth|FramebufferClear::Stencil);

    Image2D colorImage(ColorFormat::RGBA, ColorType::UnsignedByte);
    framebuffer.read({16, 8}, {8, 16}, colorImage);
    CORRADE_COMPARE(colorImage.size(), Vector2i(8, 16));

    MAGNUM_VERIFY_NO_ERROR();
    CORRADE_COMPARE(colorImage.data<Color4ub>()[0], Color4ub(128, 64, 32, 17));

    #ifdef MAGNUM_TARGET_GLES
    if(Context::current()->isExtensionSupported<Extensions::GL::NV::read_depth>())
    #endif
    {
        #ifdef MAGNUM_TARGET_GLES
        Debug() << "Using" << Extensions::GL::NV::read_depth::string();
        #endif

        Image2D depthImage(ColorFormat::DepthComponent, ColorType::UnsignedShort);
        framebuffer.read({}, Vector2i(1), depthImage);

        MAGNUM_VERIFY_NO_ERROR();
        CORRADE_COMPARE(depthImage.data<UnsignedShort>()[0], 48352);
    }

    #ifdef MAGNUM_TARGET_GLES
    if(Context::current()->isExtensionSupported<Extensions::GL::NV::read_stencil>())
    #endif
    {
        #ifdef MAGNUM_TARGET_GLES
        Debug() << "Using" << Extensions::GL::NV::read_stencil::string();
        #endif

        Image2D stencilImage(ColorFormat::StencilIndex, ColorType::UnsignedByte);
        framebuffer.read({}, Vector2i(1), stencilImage);

        MAGNUM_VERIFY_NO_ERROR();
        CORRADE_COMPARE(stencilImage.data<UnsignedByte>()[0], 67);
    }

    #ifdef MAGNUM_TARGET_GLES
    if(Context::current()->isExtensionSupported<Extensions::GL::NV::read_depth_stencil>())
    #endif
    {
        #ifdef MAGNUM_TARGET_GLES
        Debug() << "Using" << Extensions::GL::NV::read_depth_stencil::string();
        #endif

        Image2D depthStencilImage(ColorFormat::DepthStencil, ColorType::UnsignedInt248);
        framebuffer.read({}, Vector2i(1), depthStencilImage);

        MAGNUM_VERIFY_NO_ERROR();
        /** @todo This will probably fail on different systems */
        CORRADE_COMPARE(depthStencilImage.data<UnsignedInt>()[0] >> 8, 12378300);
        CORRADE_COMPARE(depthStencilImage.data<UnsignedByte>()[0], 67);
    }
Пример #7
0
// get depth image of hand and depth value
void Kinect::KinectZoom::calcHandDepthFrame( cv::Mat frame,openni::VideoStream* m_depth, float x, float y, float z, bool mainHand )
{
	// convert coordinates of hand
	openni::CoordinateConverter coordinateConverter;
	float x1;
	float y1;
	float x2;
	float y2;
	float z1;
	coordinateConverter.convertWorldToDepth( *m_depth, x-150.0f, y-150.0f, z, &x1, &y1, &z1 );
	coordinateConverter.convertWorldToDepth( *m_depth, x+200.0f, y+200.0f, z, &x2, &y2, &z1 );

	// store current and previous depth for main hand only
	if ( mainHand ) {
		previousZ = currentZ;
		currentZ = z1;
	}

	openni::VideoFrameRef depthFrame;
	m_depth->readFrame( &depthFrame );
	//PK mod:
	// original:
	//openni::DepthPixel* depthPixels = ( openni::DepthPixel* )depthFrame.getData();
	// new:
	openni::DepthPixel* depthPixels = const_cast<openni::DepthPixel*>( reinterpret_cast<const openni::DepthPixel*>( depthFrame.getData() ) );
	//PK end
	cv::Mat depthImage( depthFrame.getHeight(), depthFrame.getWidth(), CV_16UC1, depthPixels );

	cv::Rect rect;
	rect.x = static_cast<int>( x1 );
	rect.y = frame.rows - static_cast<int>( y1 );
	rect.width = abs( static_cast<int>( x1-x2 ) );
	rect.height = abs( static_cast<int>( y1-y2 ) );

	if ( rect.x<0 ) {
		rect.x=0;
	}
	if ( rect.y<0 ) {
		rect.y=0;
	}
	if ( rect.width<0 ) {
		rect.width=0;
	}
	if ( rect.height<0 ) {
		rect.height=0;
	}
	if ( ( rect.x+rect.width )>frame.cols-1 ) {
		rect.width =frame.cols-1-rect.x;
	}
	if ( ( rect.y+rect.height )>frame.rows-1 ) {
		rect.height = frame.rows-1-rect.y;
	}

	depthImage = depthImage( rect );
	double minVal;
	double maxVal;
	cv::Point minLoc;
	cv::Point maxLoc;

	minMaxLoc( depthImage, &minVal, &maxVal, &minLoc, &maxLoc );

	cv::Mat depthImage2;
	depthImage.convertTo( depthImage2,CV_8UC1,255/maxVal );

	// floodfill segmentation of hand from depth map
	mask = cv::Mat::zeros( depthImage2.rows + 2, depthImage2.cols + 2, CV_8U );
	cv::floodFill( depthImage2, mask, cv::Point( depthImage2.cols/2,depthImage2.rows/2 ),
				   255, 0, cv::Scalar( 4 ),
				   cv::Scalar( 4 ),  4 + ( 255 << 8 ) + cv::FLOODFILL_MASK_ONLY + cv::FLOODFILL_FIXED_RANGE );
#ifdef QT_DEBUG
	// show images of segmented hand in debug mode only
	//cv::namedWindow( "floodfill", CV_WINDOW_AUTOSIZE );
	//cv::imshow( "floodfill", mask );
	//cv::waitKey( 33 );
#endif
}
Пример #8
0
int main( int argc, char **argv ){
	string pathStr;
	gProgramName = argv[0];
    
	parseCommandLine( argc, argv );
	argc -= optind;
	argv += optind;
	if( gTheScene->hasInputSceneFilePath( ) &&
       gTheScene->hasOutputFilePath( ) &&
       gTheScene->hasDepthFilePath( ) ){
		gTheScene->parse( );
		cout << *gTheScene << endl;
	}else{
		usage( "You specify an input scene file, an output file and a depth file." );
        exit(1);
	}
    //color image with lighting
    Image image((int)(round(gTheScene->viewPlane().width/gTheScene->viewPlane().pixelsize)), (int)(round)((gTheScene->viewPlane().height/gTheScene->viewPlane().pixelsize)));
    //depth image with gray scale and no lighting
    Image depthImage(((int)(round((gTheScene->viewPlane().width/gTheScene->viewPlane().pixelsize)))), ((int)(round)((gTheScene->viewPlane().height/gTheScene->viewPlane().pixelsize))));
    
    int depthColor;
    
    //create a vector to store all of the rays
    std::vector<Ray*> listOfRays;
    
    //fill the list of rays depending on the pixelSize, Height, and Width of the ViewPlane
    rayFactory(listOfRays, gTheScene->camera(), gTheScene->viewPlane());
    
    //create a vector to store all of the hits
    std::vector<Hit*> listOfHits;
    
    //fill it with hits equal to the number of rays
    for (int j = 0; j < listOfRays.size(); ++j) {
        listOfHits.push_back(new Hit());
    }
    
    //Check for intersections with every object in our list of objects
    for (int r = 0; r < listOfRays.size(); ++r) {
        for (int obj = 0; obj < gTheScene->group().size(); ++obj) {
            if(gTheScene->group()[obj]->Intersect(*listOfRays[r], *listOfHits[r])){
                listOfHits[r]->objectNumber = obj;
            }
        }
        
    }
    
    
    //scale the t values to a range of 0 to 1
    double tMax, tMin = listOfHits[0]->t;
    for (int x = 0; x < listOfHits.size(); ++x) {
        if (tMax < listOfHits[x]->t) {
            tMax = listOfHits[x]->t;
        }
        if (tMin > listOfHits[x]->t) {
            tMin = listOfHits[x]->t;
            
        }
    }
    //For each hit we calculate the light being reflected
    for (int h = 0; h < listOfHits.size(); ++h) {
        listOfHits[h]->t /= (tMax-tMin);
        
        //if there is an intersection set the pixel color accordingly
        if (listOfHits[h]->t > 0.0) {
            depthColor = depthClamp(listOfHits[h]->t);
            Vec3 light = (gTheScene->camera().center - listOfHits[h]->hit);
            light.normalize();
            //Shader(<#Object &o#>, <#Vec3 &light#>, <#Hit &hitRecord#>, <#Vec3 normal#>)
            //cout << listOfHits[h]->objectNumber <<endl;
            image.pixels[h] = gTheScene->Shader(*gTheScene->group()[listOfHits[h]->objectNumber],
                                                light,
                                                gTheScene->group()[listOfHits[h]->objectNumber]->normal(*listOfHits[h]));
            depthImage.pixels[h] = Pixel(depthColor,depthColor,depthColor);
        }
        else {
            //otherwise set the color to our background color
            image.pixels[h] = gTheScene->backgroundColor();
            depthImage.pixels[h] = Pixel(0.4,.8,0.4);
        }
    }
    
    image.write(gTheScene->outputFile().c_str());
    depthImage.write(gTheScene->depthFile().c_str());
	return( 0 );
}
Пример #9
0
void Raytracer::render(const char *filename, const char *depth_filename,
                       Scene const &scene)
{
    // Allocate the two images that will ultimately be saved.
    Image colorImage(scene.resolution[0], scene.resolution[1]);
    Image depthImage(scene.resolution[0], scene.resolution[1]);
    
    // Create the zBuffer.
    double *zBuffer = new double[scene.resolution[0] * scene.resolution[1]];
    for(int i = 0; i < scene.resolution[0] * scene.resolution[1]; i++) {
        zBuffer[i] = DBL_MAX;
    }
    
    // @@@@@@ YOUR CODE HERE
    // calculate camera parameters for rays, refer to the slides for details
    //!!! USEFUL NOTES: tan() takes rad rather than degree, use deg2rad() to transform
    //!!! USEFUL NOTES: view plane can be anywhere, but it will be implemented differently,
    //you can find references from the course slides 22_GlobalIllum.pdf
    
    double distance = scene.camera.zNear;
    scene.camera.zFar;
    Vector eyePoint = scene.camera.position;
    Vector lookatPoint = scene.camera.center;
    // view direction
    Vector w = (lookatPoint - eyePoint).normalized();
    Vector up = scene.camera.up;
    Vector u = w.cross(up).normalized();
    Vector v = u.cross(w).normalized();
    
    
    double rad = deg2rad(scene.camera.fovy);
    double viewPlaneHalfHeight=tan(rad/2)*distance;
    
    double viewPlaneHalfWidth = scene.camera.aspect*viewPlaneHalfHeight;
    
    Vector viewPlaneBottomLeftPoint = eyePoint+ w*distance- v*viewPlaneHalfHeight - u*viewPlaneHalfWidth;
    
    
    
    // Iterate over all the pixels in the image.
    for(int y = 0; y < scene.resolution[1]; y++) {
        for(int x = 0; x < scene.resolution[0]; x++) {
            
            // Generate the appropriate ray for this pixel
            Ray ray;
            if (scene.objects.empty())
            {
                //no objects in the scene, then we render the default scene:
                //in the default scene, we assume the view plane is at z = 640 with width and height both 640
                ray = Ray(scene.camera.position, (Vector(-320, -320, 640) + Vector(x + 0.5, y + 0.5, 0) - scene.camera.position).normalized());
            }
            else
            {
                // @@@@@@ YOUR CODE HERE
                // set primary ray using the camera parameters
                //!!! USEFUL NOTES: all world coordinate rays need to have a normalized direction
                
                Vector xIncVector = (u*2*viewPlaneHalfWidth)/scene.resolution[0];
                Vector yIncVector = (v*2*viewPlaneHalfHeight)/scene.resolution[1];
                Vector viewPlanePoint = viewPlaneBottomLeftPoint + x*xIncVector + y*yIncVector;
                Vector rayDirection = viewPlanePoint - eyePoint;
                ray = Ray(eyePoint, rayDirection.normalized());
            }
            
            // Initialize recursive ray depth.
            int rayDepth = 0;
            
            // Our recursive raytrace will compute the color and the z-depth
            Vector color;
            
            // This should be the maximum depth, corresponding to the far plane.
            // NOTE: This assumes the ray direction is unit-length and the
            // ray origin is at the camera position.
            double depth = scene.camera.zFar;
            
            // Calculate the pixel value by shooting the ray into the scene
            trace(ray, rayDepth, scene, color, depth);
            
            // Depth test
            if(depth >= scene.camera.zNear && depth <= scene.camera.zFar &&
               depth < zBuffer[x + y*scene.resolution[0]]) {
                zBuffer[x + y*scene.resolution[0]] = depth;
                
                // Set the image color (and depth)
                colorImage.setPixel(x, y, color);
                depthImage.setPixel(x, y, (depth-scene.camera.zNear) /
                                    (scene.camera.zFar-scene.camera.zNear));
            }
        }
        
        //output step information
        if (y % 100 == 0)
        {
            printf("Row %d pixels finished.\n", y);
        }
    }
    
    //save image
    colorImage.writeBMP(filename);
    depthImage.writeBMP(depth_filename);
    
    printf("Ray tracing finished with images saved.\n");
    
    delete[] zBuffer;
}
Пример #10
0
void Raytracer::render(const char *filename, const char *depth_filename,
                       Scene const &scene)
{
    // Allocate the two images that will ultimately be saved.
    Image colorImage(scene.resolution[0], scene.resolution[1]);
    Image depthImage(scene.resolution[0], scene.resolution[1]);
    
    // Create the zBuffer.
    double *zBuffer = new double[scene.resolution[0] * scene.resolution[1]];
    for(int i = 0; i < scene.resolution[0] * scene.resolution[1]; i++) {
        zBuffer[i] = DBL_MAX;
    }

	// @@@@@@ YOUR CODE HERE
	// calculate camera parameters for rays, refer to the slides for details
	//!!! USEFUL NOTES: tan() takes rad rather than degree, use deg2rad() to transform
	//!!! USEFUL NOTES: view plane can be anywhere, but it will be implemented differently,
	//you can find references from the course slides 22_GlobalIllum.pdf
    
    
	Vector cameraPos = scene.camera.position;
	Vector cameraCenter = scene.camera.center;
	
	Vector cameraPosR = scene.camera.position;
	Vector cameraCenterR = scene.camera.center;
	
	// viewing direction vector get by taking center and subtracting camera position
	Vector wVecOriginal = scene.camera.center; - cameraPos;
    wVecOriginal.normalize();
	// up vector is defined (u)
    Vector uVec = scene.camera.up;
    uVec.normalize();
	// right vector is gotten by taking the cross product of w and v
	Vector rVecOriginal = wVecOriginal.cross(uVec);
	rVecOriginal.normalize();
	
	double stereoDisplacement = scene.camera.stereoDist / 2.0;
	int widthResolution = scene.resolution[0];
	if (scene.camera.stereoDist > 0.0) {		
		printf("Start left picture.\n");
		cameraPos = scene.camera.position + (rVecOriginal * stereoDisplacement);
		cameraPosR = scene.camera.position - (rVecOriginal * stereoDisplacement);
		
		widthResolution = floor(scene.resolution[0] / 2);
	} else if (scene.camera.stereoDist < 0.0) {		
		printf("Start left picture.\n");
		stereoDisplacement = - scene.camera.stereoDist / 2.0;
		cameraPos = scene.camera.position - (rVecOriginal * stereoDisplacement);
		cameraPosR = scene.camera.position + (rVecOriginal * stereoDisplacement);
		
		widthResolution = floor(scene.resolution[0] / 2);
	}

	
    Vector wVec = cameraCenter - cameraPos;
    wVec.normalize();
    Vector rVec = wVec.cross(uVec);
    rVec.normalize();
    
    // get top from tan(fovy)
    double tangent = tan(deg2rad(scene.camera.fovy/2));
	//double atangent = atan(deg2rad(scene.camera.fovy)/2);
    // get length of top from centre of image plane
    double top = scene.camera.zNear * tangent;
    double right = top * scene.camera.aspect;
	if (scene.camera.stereoDist != 0.0) {		
		right = right / 2;
	}
    double left = -right;
    double bottom = -top;
	
    // calculate vector from camera to left top of image plane
    Vector centerVec = cameraPos + (scene.camera.zNear * wVec);
    Vector oVec = centerVec + (left * rVec) + (bottom * uVec);
    double deltaU = (right - left) / scene.resolution[0];
	if (scene.camera.stereoDist != 0.0) {		
		deltaU = deltaU * 2;
	}
    double deltaV = (top - bottom) / scene.resolution[1];    
	    
    // Iterate over all the pixels in the image.
    for(int y = 0; y < scene.resolution[1]; y++) {
        for(int x = 0; x < widthResolution; x++) {

            // Generate the appropriate ray for this pixel
			Ray ray;
			if (scene.objects.empty())
			{
				//no objects in the scene, then we render the default scene:
				//in the default scene, we assume the view plane is at z = 640 with width and height both 640
				ray = Ray(cameraPos, (Vector(-320, -320, 640) + Vector(x + 0.5, y + 0.5, 0) - cameraPos).normalized());
			}
			else
			{
				// set primary ray using the camera parameters
				//!!! USEFUL NOTES: all world coordinate rays need to have a normalized direction
				
				Vector changeU = (x + 0.5) * deltaU * rVec;
                Vector changeY = (y + 0.5) * deltaV * uVec;
                Vector pixelPos = oVec + changeU + changeY;
                
                Vector rayOfHope = pixelPos - cameraPos;
                rayOfHope.normalize();
                
                ray = Ray(cameraPos, rayOfHope);
                //!!! rays do not have w coordinate constructed properly.
			}

            // Initialize recursive ray depth.
            int rayDepth = 0;
           
            // Our recursive raytrace will compute the color and the z-depth
            Vector color;

            // This should be the maximum depth, corresponding to the far plane.
            // NOTE: This assumes the ray direction is unit-length and the
            // ray origin is at the camera position.
            double depth = scene.camera.zFar;

            // Calculate the pixel value by shooting the ray into the scene
            trace(ray, rayDepth, scene, color, depth);

            // Depth test
            if(depth >= scene.camera.zNear && depth <= scene.camera.zFar && 
                depth < zBuffer[x + y*scene.resolution[0]]) {
                zBuffer[x + y*scene.resolution[0]] = depth;

                // Set the image color (and depth)
                colorImage.setPixel(x, y, color);
                depthImage.setPixel(x, y, (depth-scene.camera.zNear) / 
                                        (scene.camera.zFar-scene.camera.zNear));
            }
        }

		//output step information
		if (y % 100 == 0)
		{
			printf("Row %d pixels finished.\n", y);
		}
    }
	
	if (scene.camera.stereoDist != 0.0) {		
		printf("Start right picture.\n");
		Vector wVecR = cameraCenterR - cameraPosR;
		wVecR.normalize();
		// up vector is defined (u)
		// right vector is gotten by taking the cross product of w and v
		Vector rVecR = wVecR.cross(uVec);
		rVecR.normalize();
		
		// calculate vector from camera to left top of image plane
		Vector centerVecR = cameraPosR + (scene.camera.zNear * wVecR);
		Vector oVecR = centerVecR + (left * rVecR) + (bottom * uVec);
		
		// Iterate over all the pixels in the image.
		for(int y = 0; y < scene.resolution[1]; y++) {
			for(int x = 0; x < (scene.resolution[0] / 2); x++) {

				// Generate the appropriate ray for this pixel
				Ray ray;
				if (scene.objects.empty())
				{
					//no objects in the scene, then we render the default scene:
					//in the default scene, we assume the view plane is at z = 640 with width and height both 640
					ray = Ray(cameraPosR, (Vector(-320, -320, 640) + Vector(x + 0.5, y + 0.5, 0) - cameraPosR).normalized());
				}
				else
				{
					// set primary ray using the camera parameters
					//!!! USEFUL NOTES: all world coordinate rays need to have a normalized direction
					
					Vector changeU = (x + 0.5) * deltaU * rVecR;
					Vector changeY = (y + 0.5) * deltaV * uVec;
					Vector pixelPos = oVecR + changeU + changeY;
					
					Vector rayOfHope = pixelPos - cameraPosR;
					rayOfHope.normalize();
					ray = Ray(cameraPosR, rayOfHope);
					//!!! rays do not have w coordinate constructed properly.
				}

				// Initialize recursive ray depth.
				int rayDepth = 0;
			   
				// Our recursive raytrace will compute the color and the z-depth
				Vector color;

				// This should be the maximum depth, corresponding to the far plane.
				// NOTE: This assumes the ray direction is unit-length and the
				// ray origin is at the camera position.
				double depth = scene.camera.zFar;

				// Calculate the pixel value by shooting the ray into the scene
				trace(ray, rayDepth, scene, color, depth);

				// Depth test
				int testDepth = x + floor(scene.resolution[0] / 2) + y*scene.resolution[0];
				if(depth >= scene.camera.zNear && depth <= scene.camera.zFar && 
					depth < zBuffer[testDepth]) {
					zBuffer[testDepth] = depth;

					// Set the image color (and depth)
					colorImage.setPixel(x+floor(scene.resolution[0] / 2), y, color);
					depthImage.setPixel(x+floor(scene.resolution[0] / 2), y, (depth-scene.camera.zNear) / 
											(scene.camera.zFar-scene.camera.zNear));
				}
			}

			//output step information
			if (y % 100 == 0)
			{
				printf("Row %d pixels finished.\n", y);
			}
		}
	}

	//save image
    colorImage.writeBMP(filename);
    depthImage.writeBMP(depth_filename);

	printf("Ray tracing finished with images saved.\n");

    delete[] zBuffer;
}