Example #1
0
cv::Mat draw(int id, int cellSize, bool withMargin, cv::Scalar color) const {
    // Creating the image of the bit matrix
    static const int DATA_SIZE = 6;
    cv::Size dataDim(DATA_SIZE,DATA_SIZE);
    unsigned char dataMatrix[DATA_SIZE*DATA_SIZE];
    mDecode.getCodec().getTagEncodedId(id, dataMatrix);
    cv::Mat dataImage(dataDim, CV_8U, dataMatrix);

    // Adding the black border arounf the bit matrix
    cv::Size borderSize(2,2);
    cv::Mat tagImage(dataImage.size()+borderSize*2, CV_8U, cv::Scalar(0));
    dataImage.copyTo(tagImage(cv::Rect(borderSize, dataImage.size())));

    // Adding the optionnal white margin
    cv::Size marginSize(0,0);
    if (withMargin) marginSize += borderSize;
    cv::Mat outlinedImage(tagImage.size()+marginSize*2, CV_8U, cv::Scalar(1));
    tagImage.copyTo(outlinedImage(cv::Rect(marginSize, tagImage.size())));

    // Resizing to specified cellSize
    cv::Mat sizedImage(outlinedImage.size()*cellSize, CV_8U);
    cv::resize(outlinedImage, sizedImage, sizedImage.size(), 0, 0, cv::INTER_NEAREST);

    // Coloring
    cv::Mat   redImage = (1-sizedImage)*color[0]+sizedImage*255;
    cv::Mat greenImage = (1-sizedImage)*color[1]+sizedImage*255;
    cv::Mat  blueImage = (1-sizedImage)*color[2]+sizedImage*255;
    cv::Mat colorImage(sizedImage.size(), CV_8UC3);
    cv::merge(std::vector<cv::Mat>{blueImage, greenImage, redImage}, colorImage);

    return colorImage;
}
int main(int argc, char **argv)
{
	QApplication app(argc, argv);

	QKinectGrabberV1 k;
	k.start();

	QImageWidget colorWidget;
	//colorWidget.setMinimumSize(720, 480);
	colorWidget.setMinimumSize(640, 480);
	colorWidget.show();
	QApplication::connect(&k, SIGNAL(colorImage(QImage)), &colorWidget, SLOT(setImage(QImage)));

	QImageWidget depthWidget;
	//depthWidget.setMinimumSize(512, 424);
	depthWidget.setMinimumSize(640, 480);
	depthWidget.show();
	QApplication::connect(&k, SIGNAL(depthImage(QImage)), &depthWidget, SLOT(setImage(QImage)));

	//QImageWidget infraredWidget;
	//infraredWidget.setMinimumSize(512, 424);
	//infraredWidget.show();
	//QApplication::connect(&k, SIGNAL(infraredImage(QImage)), &infraredWidget, SLOT(setImage(QImage)));

	int app_exit = app.exec();
	k.stop();
	return app_exit;

}
PointBufferPtr KinectIO::getBuffer()
{
	// Get depth image from sensor
	std::vector<short> depthImage(480 * 680, 0);
	m_grabber->getDepthImage(depthImage);

	std::vector<uint8_t> colorImage(480 * 680 * 3, 0);
	m_grabber->getColorImage(colorImage);

	std::set<int> nans;
	for(size_t i = 0; i < depthImage.size(); i++)
	{
		if(isnan(depthImage[i])) nans.insert(i);
	}

	// Return null pointer if no image was grabbed
	if(depthImage.size() == 0) return PointBufferPtr();

	size_t numPoints = depthImage.size() - nans.size();

	// Convert depth image into point cloud
	PointBufferPtr buffer(new PointBuffer);
	floatArr points(new float[numPoints * 3]);
	ucharArr colors(new uchar[numPoints * 3]);

	int i,j;
	int index = 0;
	int c = 0;
	for (i = 0; i < 480; i++) {
		for (j = 0; j < 640; j++) {

			if(nans.find(c) == nans.end())
			{
				Eigen::Vector4f v;
				v << j, i, (float)(depthImage[i * 640 + j]), 1.0f;
				v = m_depthMatrix.transpose() * v;

				points[3 * index    ] = v(0) / v(3);
				points[3 * index + 1] = v(1) / v(3);
				points[3 * index + 2] = v(2) / v(3);

				colors[3 * index    ] = colorImage[3 * c    ];
				colors[3 * index + 1] = colorImage[3 * c + 1];
				colors[3 * index + 2] = colorImage[3 * c + 2];
				index++;
			}
			c++;
		}
	}

	buffer->setPointArray(points, numPoints);
	buffer->setPointColorArray(colors, numPoints);
	return buffer;
}
void CameraWorker::onTimeout()
{
    if(isCamera)
    {
        displayFrame = displayCamFrame;
        frame = cvQueryFrame(capture);
    }
    else
    {
        displayFrame = displayArenaFrame;
        frame = arenaFrame;
    }
    if(!frame)
        return;
    frame->roi = roi;
    cvResize(frame, calibFrame, CV_INTER_NN);
    cvCopy(calibFrame, displayFrame);
    if(isThreshold)
        colorImage(calibFrame, displayFrame);    
    if(isBlob)
    {
        makeBlobImage(frame, blobImage);
        b->detectBlobs(blobImage, a.getZoneImage());
        blobDataArr = b->getBlobDataArr();
        drawBlobs(displayFrame, blobDataArr);
        myMutex->lock();
        bs->populateFromBlobData(blobDataArr);
        bs->bombDepositPoint = a.getBombDrop();
        bs->resourceDepositPoint = a.getMineDrop();
        bs->startCorner = a.getStartCorner();
        myMutex->unlock();
        emit beliefStateReady(bs);
    }
    if(isArenaCalib)
    {
        a.drawArenaDisplay(displayFrame);
    }
    cvCvtColor(displayFrame, displayFrame,CV_BGR2RGB);
    QImage qimg((uchar*)displayFrame->imageData, displayFrame->width, displayFrame->height, displayFrame->widthStep, QImage::Format_RGB888);
    myMutex->lock();
    if(myPixmap)
        delete myPixmap;
    myPixmap = new QPixmap(QPixmap::fromImage(qimg));
    myMutex->unlock();
    emit imageReady(myPixmap);
    timer->setSingleShot(true);
    timer->start(10);
}
Example #5
0
void PCSDKImage::updateDepthImage()
{
    auto depthImage = pcsdk_->getDepthImage();

    const size_t imageSize = PCSDK::DEPTH_WIDTH * PCSDK::DEPTH_HEIGHT;
    cv::Mat colorImage(PCSDK::DEPTH_HEIGHT, PCSDK::DEPTH_WIDTH, CV_8UC3);

    for (int i = 0; i < PCSDK::DEPTH_WIDTH; ++i) {
        for (int j = 0; j < PCSDK::DEPTH_HEIGHT; ++j) {
            const int index = PCSDK::DEPTH_HEIGHT * i + j;
            /*
            const double maxLength = 600; // mm
            const double data = depthImage[index];
            const double base = maxLength / 3;

            double b = 0, g = 0, r = 0;
            if (data < base) {
                 b = data / base * 255;
            } else if (data < 2 * base) {
                g = (data - base) / base * 255;
                b = 255 - g;
            } else if (data < 3 * base) {
                r = (data - 2 * base) / base * 255;
                g = 255 - r;
            } else {
                r = 255;
            }
            colorImage[3*(240*i + j) + 0] = r;
            colorImage[3*(240*i + j) + 1] = g;
            colorImage[3*(240*i + j) + 2] = b;
            */
            const double data = depthImage[index];
            const double depthColor = (data > minDistance_ && data < maxDistance_) ?
                        (data - minDistance_) / (maxDistance_ - minDistance_) * 255 : 0;
            colorImage.data[3*index] = colorImage.data[3*index + 1] = colorImage.data[3*index + 2] = depthColor;
        }
    }

    image_ = colorImage;
    emit imageChanged();
}
    // カラーフレームの更新
    void updateColorFrame()
    {
        if ( colorFrameReader == nullptr ){
            return;
        }

        // フレームを取得する
        ComPtr<IColorFrame> colorFrame;
        auto ret = colorFrameReader->AcquireLatestFrame( &colorFrame );
        if ( ret == S_OK ){
            // BGRAの形式でデータを取得する
            ERROR_CHECK( colorFrame->CopyConvertedFrameDataToArray(
                colorBuffer.size(), &colorBuffer[0], ColorImageFormat::ColorImageFormat_Bgra ) );

            // カラーデータを表示する
            cv::Mat colorImage( colorHeight, colorWidth, CV_8UC4, &colorBuffer[0] );
            cv::imshow( "Color Image", colorImage );

            // スマートポインタを使ってない場合は、自分でフレームを解放する
            // colorFrame->Release();
        }
    }
Example #7
0
void MKinect::getColorData(IMultiSourceFrame* frame, QImage& dest) {
	IColorFrame* colorframe;
	IColorFrameReference* frameref = NULL;
	frame->get_ColorFrameReference(&frameref);
	frameref->AcquireFrame(&colorframe);
	if (frameref) frameref->Release();
	if (!colorframe) return;

	// Process color frame data...
	colorframe->CopyConvertedFrameDataToArray(KinectColorWidth*KinectColorHeight * 4, data, ColorImageFormat_Bgra);
	QImage colorImage(data, KinectColorWidth, KinectColorHeight, QImage::Format_RGB32);
	//QImage depthImage(depthData.planes[0], width2, height2, QImage::Format_RGB32);
	dest = colorImage;
	//QDir dir("../tests/k2/last_test");
	//if (!dir.exists()) {
	//	dir.mkpath(".");
	//	colorImage.save("../tests/k2/last_test/image_" + QString::number(_actual_frame) + ".png", 0);
	//}
	//else {
	//	colorImage.save("../tests/k2/last_test/image_" + QString::number(_actual_frame) + ".png", 0);
	//}
	if (colorframe) colorframe->Release();
}
Example #8
0
void FramebufferGLTest::read() {
    #ifndef MAGNUM_TARGET_GLES
    if(!Context::current()->isExtensionSupported<Extensions::GL::ARB::framebuffer_object>())
        CORRADE_SKIP(Extensions::GL::ARB::framebuffer_object::string() + std::string(" is not available."));
    #endif

    Renderbuffer color;
    #ifndef MAGNUM_TARGET_GLES2
    color.setStorage(RenderbufferFormat::RGBA8, Vector2i(128));
    #else
    color.setStorage(RenderbufferFormat::RGBA4, Vector2i(128));
    #endif

    /* Separate depth and stencil renderbuffers are not supported (or at least
       on my NVidia, thus we need to do this juggling with one renderbuffer */
    Renderbuffer depthStencil;
    #ifdef MAGNUM_TARGET_GLES2
    if(Context::current()->isExtensionSupported<Extensions::GL::OES::packed_depth_stencil>())
    #endif
    {
        #ifdef MAGNUM_TARGET_GLES2
        Debug() << "Using" << Extensions::GL::OES::packed_depth_stencil::string();
        #endif
        depthStencil.setStorage(RenderbufferFormat::Depth24Stencil8, Vector2i(128));
    }
    #ifdef MAGNUM_TARGET_GLES2
    else depthStencil.setStorage(RenderbufferFormat::DepthComponent16, Vector2i(128));
    #endif

    Framebuffer framebuffer({{}, Vector2i(128)});
    framebuffer.attachRenderbuffer(Framebuffer::ColorAttachment(0), color)
               .attachRenderbuffer(Framebuffer::BufferAttachment::Depth, depthStencil);

    #ifdef MAGNUM_TARGET_GLES2
    if(Context::current()->isExtensionSupported<Extensions::GL::OES::packed_depth_stencil>())
    #endif
    {
        framebuffer.attachRenderbuffer(Framebuffer::BufferAttachment::Stencil, depthStencil);
    }

    MAGNUM_VERIFY_NO_ERROR();
    CORRADE_COMPARE(framebuffer.checkStatus(FramebufferTarget::ReadDraw), Framebuffer::Status::Complete);

    Renderer::setClearColor(Math::normalize<Color4>(Color4ub(128, 64, 32, 17)));
    Renderer::setClearDepth(Math::normalize<Float, UnsignedShort>(48352));
    Renderer::setClearStencil(67);
    framebuffer.clear(FramebufferClear::Color|FramebufferClear::Depth|FramebufferClear::Stencil);

    Image2D colorImage(ColorFormat::RGBA, ColorType::UnsignedByte);
    framebuffer.read({16, 8}, {8, 16}, colorImage);
    CORRADE_COMPARE(colorImage.size(), Vector2i(8, 16));

    MAGNUM_VERIFY_NO_ERROR();
    CORRADE_COMPARE(colorImage.data<Color4ub>()[0], Color4ub(128, 64, 32, 17));

    #ifdef MAGNUM_TARGET_GLES
    if(Context::current()->isExtensionSupported<Extensions::GL::NV::read_depth>())
    #endif
    {
        #ifdef MAGNUM_TARGET_GLES
        Debug() << "Using" << Extensions::GL::NV::read_depth::string();
        #endif

        Image2D depthImage(ColorFormat::DepthComponent, ColorType::UnsignedShort);
        framebuffer.read({}, Vector2i(1), depthImage);

        MAGNUM_VERIFY_NO_ERROR();
        CORRADE_COMPARE(depthImage.data<UnsignedShort>()[0], 48352);
    }

    #ifdef MAGNUM_TARGET_GLES
    if(Context::current()->isExtensionSupported<Extensions::GL::NV::read_stencil>())
    #endif
    {
        #ifdef MAGNUM_TARGET_GLES
        Debug() << "Using" << Extensions::GL::NV::read_stencil::string();
        #endif

        Image2D stencilImage(ColorFormat::StencilIndex, ColorType::UnsignedByte);
        framebuffer.read({}, Vector2i(1), stencilImage);

        MAGNUM_VERIFY_NO_ERROR();
        CORRADE_COMPARE(stencilImage.data<UnsignedByte>()[0], 67);
    }

    #ifdef MAGNUM_TARGET_GLES
    if(Context::current()->isExtensionSupported<Extensions::GL::NV::read_depth_stencil>())
    #endif
    {
        #ifdef MAGNUM_TARGET_GLES
        Debug() << "Using" << Extensions::GL::NV::read_depth_stencil::string();
        #endif

        Image2D depthStencilImage(ColorFormat::DepthStencil, ColorType::UnsignedInt248);
        framebuffer.read({}, Vector2i(1), depthStencilImage);

        MAGNUM_VERIFY_NO_ERROR();
        /** @todo This will probably fail on different systems */
        CORRADE_COMPARE(depthStencilImage.data<UnsignedInt>()[0] >> 8, 12378300);
        CORRADE_COMPARE(depthStencilImage.data<UnsignedByte>()[0], 67);
    }
Example #9
0
void Raytracer::render(const char *filename, const char *depth_filename,
                       Scene const &scene)
{
    // Allocate the two images that will ultimately be saved.
    Image colorImage(scene.resolution[0], scene.resolution[1]);
    Image depthImage(scene.resolution[0], scene.resolution[1]);
    
    // Create the zBuffer.
    double *zBuffer = new double[scene.resolution[0] * scene.resolution[1]];
    for(int i = 0; i < scene.resolution[0] * scene.resolution[1]; i++) {
        zBuffer[i] = DBL_MAX;
    }
    
    // @@@@@@ YOUR CODE HERE
    // calculate camera parameters for rays, refer to the slides for details
    //!!! USEFUL NOTES: tan() takes rad rather than degree, use deg2rad() to transform
    //!!! USEFUL NOTES: view plane can be anywhere, but it will be implemented differently,
    //you can find references from the course slides 22_GlobalIllum.pdf
    
    double distance = scene.camera.zNear;
    scene.camera.zFar;
    Vector eyePoint = scene.camera.position;
    Vector lookatPoint = scene.camera.center;
    // view direction
    Vector w = (lookatPoint - eyePoint).normalized();
    Vector up = scene.camera.up;
    Vector u = w.cross(up).normalized();
    Vector v = u.cross(w).normalized();
    
    
    double rad = deg2rad(scene.camera.fovy);
    double viewPlaneHalfHeight=tan(rad/2)*distance;
    
    double viewPlaneHalfWidth = scene.camera.aspect*viewPlaneHalfHeight;
    
    Vector viewPlaneBottomLeftPoint = eyePoint+ w*distance- v*viewPlaneHalfHeight - u*viewPlaneHalfWidth;
    
    
    
    // Iterate over all the pixels in the image.
    for(int y = 0; y < scene.resolution[1]; y++) {
        for(int x = 0; x < scene.resolution[0]; x++) {
            
            // Generate the appropriate ray for this pixel
            Ray ray;
            if (scene.objects.empty())
            {
                //no objects in the scene, then we render the default scene:
                //in the default scene, we assume the view plane is at z = 640 with width and height both 640
                ray = Ray(scene.camera.position, (Vector(-320, -320, 640) + Vector(x + 0.5, y + 0.5, 0) - scene.camera.position).normalized());
            }
            else
            {
                // @@@@@@ YOUR CODE HERE
                // set primary ray using the camera parameters
                //!!! USEFUL NOTES: all world coordinate rays need to have a normalized direction
                
                Vector xIncVector = (u*2*viewPlaneHalfWidth)/scene.resolution[0];
                Vector yIncVector = (v*2*viewPlaneHalfHeight)/scene.resolution[1];
                Vector viewPlanePoint = viewPlaneBottomLeftPoint + x*xIncVector + y*yIncVector;
                Vector rayDirection = viewPlanePoint - eyePoint;
                ray = Ray(eyePoint, rayDirection.normalized());
            }
            
            // Initialize recursive ray depth.
            int rayDepth = 0;
            
            // Our recursive raytrace will compute the color and the z-depth
            Vector color;
            
            // This should be the maximum depth, corresponding to the far plane.
            // NOTE: This assumes the ray direction is unit-length and the
            // ray origin is at the camera position.
            double depth = scene.camera.zFar;
            
            // Calculate the pixel value by shooting the ray into the scene
            trace(ray, rayDepth, scene, color, depth);
            
            // Depth test
            if(depth >= scene.camera.zNear && depth <= scene.camera.zFar &&
               depth < zBuffer[x + y*scene.resolution[0]]) {
                zBuffer[x + y*scene.resolution[0]] = depth;
                
                // Set the image color (and depth)
                colorImage.setPixel(x, y, color);
                depthImage.setPixel(x, y, (depth-scene.camera.zNear) /
                                    (scene.camera.zFar-scene.camera.zNear));
            }
        }
        
        //output step information
        if (y % 100 == 0)
        {
            printf("Row %d pixels finished.\n", y);
        }
    }
    
    //save image
    colorImage.writeBMP(filename);
    depthImage.writeBMP(depth_filename);
    
    printf("Ray tracing finished with images saved.\n");
    
    delete[] zBuffer;
}
Example #10
0
 // カラーデータの表示処理
 void drawColorFrame()
 {
     // カラーデータを表示する
     cv::Mat colorImage( colorHeight, colorWidth, CV_8UC4, &colorBuffer[0] );
     cv::imshow( "Color Image", colorImage );
 }
Example #11
0
void Raytracer::render(const char *filename, const char *depth_filename,
                       Scene const &scene)
{
    // Allocate the two images that will ultimately be saved.
    Image colorImage(scene.resolution[0], scene.resolution[1]);
    Image depthImage(scene.resolution[0], scene.resolution[1]);
    
    // Create the zBuffer.
    double *zBuffer = new double[scene.resolution[0] * scene.resolution[1]];
    for(int i = 0; i < scene.resolution[0] * scene.resolution[1]; i++) {
        zBuffer[i] = DBL_MAX;
    }

	// @@@@@@ YOUR CODE HERE
	// calculate camera parameters for rays, refer to the slides for details
	//!!! USEFUL NOTES: tan() takes rad rather than degree, use deg2rad() to transform
	//!!! USEFUL NOTES: view plane can be anywhere, but it will be implemented differently,
	//you can find references from the course slides 22_GlobalIllum.pdf
    
    
	Vector cameraPos = scene.camera.position;
	Vector cameraCenter = scene.camera.center;
	
	Vector cameraPosR = scene.camera.position;
	Vector cameraCenterR = scene.camera.center;
	
	// viewing direction vector get by taking center and subtracting camera position
	Vector wVecOriginal = scene.camera.center; - cameraPos;
    wVecOriginal.normalize();
	// up vector is defined (u)
    Vector uVec = scene.camera.up;
    uVec.normalize();
	// right vector is gotten by taking the cross product of w and v
	Vector rVecOriginal = wVecOriginal.cross(uVec);
	rVecOriginal.normalize();
	
	double stereoDisplacement = scene.camera.stereoDist / 2.0;
	int widthResolution = scene.resolution[0];
	if (scene.camera.stereoDist > 0.0) {		
		printf("Start left picture.\n");
		cameraPos = scene.camera.position + (rVecOriginal * stereoDisplacement);
		cameraPosR = scene.camera.position - (rVecOriginal * stereoDisplacement);
		
		widthResolution = floor(scene.resolution[0] / 2);
	} else if (scene.camera.stereoDist < 0.0) {		
		printf("Start left picture.\n");
		stereoDisplacement = - scene.camera.stereoDist / 2.0;
		cameraPos = scene.camera.position - (rVecOriginal * stereoDisplacement);
		cameraPosR = scene.camera.position + (rVecOriginal * stereoDisplacement);
		
		widthResolution = floor(scene.resolution[0] / 2);
	}

	
    Vector wVec = cameraCenter - cameraPos;
    wVec.normalize();
    Vector rVec = wVec.cross(uVec);
    rVec.normalize();
    
    // get top from tan(fovy)
    double tangent = tan(deg2rad(scene.camera.fovy/2));
	//double atangent = atan(deg2rad(scene.camera.fovy)/2);
    // get length of top from centre of image plane
    double top = scene.camera.zNear * tangent;
    double right = top * scene.camera.aspect;
	if (scene.camera.stereoDist != 0.0) {		
		right = right / 2;
	}
    double left = -right;
    double bottom = -top;
	
    // calculate vector from camera to left top of image plane
    Vector centerVec = cameraPos + (scene.camera.zNear * wVec);
    Vector oVec = centerVec + (left * rVec) + (bottom * uVec);
    double deltaU = (right - left) / scene.resolution[0];
	if (scene.camera.stereoDist != 0.0) {		
		deltaU = deltaU * 2;
	}
    double deltaV = (top - bottom) / scene.resolution[1];    
	    
    // Iterate over all the pixels in the image.
    for(int y = 0; y < scene.resolution[1]; y++) {
        for(int x = 0; x < widthResolution; x++) {

            // Generate the appropriate ray for this pixel
			Ray ray;
			if (scene.objects.empty())
			{
				//no objects in the scene, then we render the default scene:
				//in the default scene, we assume the view plane is at z = 640 with width and height both 640
				ray = Ray(cameraPos, (Vector(-320, -320, 640) + Vector(x + 0.5, y + 0.5, 0) - cameraPos).normalized());
			}
			else
			{
				// set primary ray using the camera parameters
				//!!! USEFUL NOTES: all world coordinate rays need to have a normalized direction
				
				Vector changeU = (x + 0.5) * deltaU * rVec;
                Vector changeY = (y + 0.5) * deltaV * uVec;
                Vector pixelPos = oVec + changeU + changeY;
                
                Vector rayOfHope = pixelPos - cameraPos;
                rayOfHope.normalize();
                
                ray = Ray(cameraPos, rayOfHope);
                //!!! rays do not have w coordinate constructed properly.
			}

            // Initialize recursive ray depth.
            int rayDepth = 0;
           
            // Our recursive raytrace will compute the color and the z-depth
            Vector color;

            // This should be the maximum depth, corresponding to the far plane.
            // NOTE: This assumes the ray direction is unit-length and the
            // ray origin is at the camera position.
            double depth = scene.camera.zFar;

            // Calculate the pixel value by shooting the ray into the scene
            trace(ray, rayDepth, scene, color, depth);

            // Depth test
            if(depth >= scene.camera.zNear && depth <= scene.camera.zFar && 
                depth < zBuffer[x + y*scene.resolution[0]]) {
                zBuffer[x + y*scene.resolution[0]] = depth;

                // Set the image color (and depth)
                colorImage.setPixel(x, y, color);
                depthImage.setPixel(x, y, (depth-scene.camera.zNear) / 
                                        (scene.camera.zFar-scene.camera.zNear));
            }
        }

		//output step information
		if (y % 100 == 0)
		{
			printf("Row %d pixels finished.\n", y);
		}
    }
	
	if (scene.camera.stereoDist != 0.0) {		
		printf("Start right picture.\n");
		Vector wVecR = cameraCenterR - cameraPosR;
		wVecR.normalize();
		// up vector is defined (u)
		// right vector is gotten by taking the cross product of w and v
		Vector rVecR = wVecR.cross(uVec);
		rVecR.normalize();
		
		// calculate vector from camera to left top of image plane
		Vector centerVecR = cameraPosR + (scene.camera.zNear * wVecR);
		Vector oVecR = centerVecR + (left * rVecR) + (bottom * uVec);
		
		// Iterate over all the pixels in the image.
		for(int y = 0; y < scene.resolution[1]; y++) {
			for(int x = 0; x < (scene.resolution[0] / 2); x++) {

				// Generate the appropriate ray for this pixel
				Ray ray;
				if (scene.objects.empty())
				{
					//no objects in the scene, then we render the default scene:
					//in the default scene, we assume the view plane is at z = 640 with width and height both 640
					ray = Ray(cameraPosR, (Vector(-320, -320, 640) + Vector(x + 0.5, y + 0.5, 0) - cameraPosR).normalized());
				}
				else
				{
					// set primary ray using the camera parameters
					//!!! USEFUL NOTES: all world coordinate rays need to have a normalized direction
					
					Vector changeU = (x + 0.5) * deltaU * rVecR;
					Vector changeY = (y + 0.5) * deltaV * uVec;
					Vector pixelPos = oVecR + changeU + changeY;
					
					Vector rayOfHope = pixelPos - cameraPosR;
					rayOfHope.normalize();
					ray = Ray(cameraPosR, rayOfHope);
					//!!! rays do not have w coordinate constructed properly.
				}

				// Initialize recursive ray depth.
				int rayDepth = 0;
			   
				// Our recursive raytrace will compute the color and the z-depth
				Vector color;

				// This should be the maximum depth, corresponding to the far plane.
				// NOTE: This assumes the ray direction is unit-length and the
				// ray origin is at the camera position.
				double depth = scene.camera.zFar;

				// Calculate the pixel value by shooting the ray into the scene
				trace(ray, rayDepth, scene, color, depth);

				// Depth test
				int testDepth = x + floor(scene.resolution[0] / 2) + y*scene.resolution[0];
				if(depth >= scene.camera.zNear && depth <= scene.camera.zFar && 
					depth < zBuffer[testDepth]) {
					zBuffer[testDepth] = depth;

					// Set the image color (and depth)
					colorImage.setPixel(x+floor(scene.resolution[0] / 2), y, color);
					depthImage.setPixel(x+floor(scene.resolution[0] / 2), y, (depth-scene.camera.zNear) / 
											(scene.camera.zFar-scene.camera.zNear));
				}
			}

			//output step information
			if (y % 100 == 0)
			{
				printf("Row %d pixels finished.\n", y);
			}
		}
	}

	//save image
    colorImage.writeBMP(filename);
    depthImage.writeBMP(depth_filename);

	printf("Ray tracing finished with images saved.\n");

    delete[] zBuffer;
}