Ejemplo n.º 1
0
void RayTracer::CalculatePixels(int ymin, int ymax)
{
	for (int r = ymin; r < ymax; ++r)
	{
		for (int c = 0; c < static_cast<int>(currentResolution.x); ++c)
		{
			imageWriter.SetPixelColor(currentSampler->ComputeSamplesAndColor(maxSamplesPerPixel, 2, [&](glm::vec3 inputSample) {
				const glm::vec3 minRange(-0.5f, -0.5f, 0.f);
				const glm::vec3 maxRange(0.5f, 0.5f, 0.f);
				const glm::vec3 sampleOffset = (maxSamplesPerPixel == 1) ? glm::vec3(0.f, 0.f, 0.f) : minRange + (maxRange - minRange) * inputSample;

				glm::vec2 normalizedCoordinates(static_cast<float>(c) + sampleOffset.x, static_cast<float>(r) + sampleOffset.y);
				normalizedCoordinates /= currentResolution;

				// Construct ray, send it out into the scene and see what we hit.
				std::shared_ptr<Ray> cameraRay = currentCamera->GenerateRayForNormalizedCoordinates(normalizedCoordinates);
				assert(cameraRay);

				IntersectionState rayIntersection(storedApplication->GetMaxReflectionBounces(), storedApplication->GetMaxRefractionBounces());
				bool didHitScene = currentScene->Trace(cameraRay.get(), &rayIntersection);

				// Use the intersection data to compute the BRDF response.
				glm::vec3 sampleColor;
				if (didHitScene)
				{
					sampleColor = currentRenderer->ComputeSampleColor(rayIntersection, *cameraRay.get());
				}

				return sampleColor;
			}), c, r);
		}
	}
}
Ejemplo n.º 2
0
bool PickingContainer::performMousePick(MouseEvent* e) {
    if (!pickingEnabled() || e->button() == MouseEvent::MOUSE_BUTTON_NONE)
        return false;

    if (touchPickingOn_)
        return true;

    if (e->state() == MouseEvent::MOUSE_STATE_RELEASE){
        mouseIsDown_ = false;
        mousePickingOngoing_ = false;
        return false;
    }
    else if (!mouseIsDown_ || e->state() == MouseEvent::MOUSE_STATE_PRESS){
        mouseIsDown_ = true;

        uvec2 coord = mousePosToPixelCoordinates(e->pos(), e->canvasSize());
        prevMouseCoord_ = coord;

        mousePickObj_ = findPickingObject(coord);

        if (mousePickObj_) {
            mousePickingOngoing_ = true;
            mousePickObj_->setPickingPosition(normalizedCoordinates(coord));
            mousePickObj_->setPickingDepth(e->depth());
            mousePickObj_->setPickingMouseEvent(*e);

            mousePickObj_->setPickingMove(vec2(0.f, 0.f));
            mousePickObj_->picked();
            return true;
        }
        else{
            mousePickingOngoing_ = false;
            return false;
        }
    }
    else if (e->state() == MouseEvent::MOUSE_STATE_MOVE){
        if (mousePickingOngoing_){
            uvec2 coord = mousePosToPixelCoordinates(e->pos(), e->canvasSize());
            mousePickObj_->setPickingMove(pixelMoveVector(prevMouseCoord_, coord));
            mousePickObj_->setPickingMouseEvent(*e);
            prevMouseCoord_ = coord;
            mousePickObj_->picked();
            return true;
        }
        else
            return false;
    }

    return false;
}
Ejemplo n.º 3
0
void RayTracer::Run2()
{
// #pragma omp parallel for num_threads(8)

    for (int r = 0; r < static_cast<int>(currentResolution.y); ++r)
	{
        for (int c = 0; c < static_cast<int>(currentResolution.x); ++c) 
		{
            imageWriter.SetPixelColor(currentSampler->ComputeSamplesAndColor(maxSamplesPerPixel, 2, [&](glm::vec3 inputSample) {
                const glm::vec3 minRange(-0.5f, -0.5f, 0.f);
                const glm::vec3 maxRange(0.5f, 0.5f, 0.f);
                const glm::vec3 sampleOffset = (maxSamplesPerPixel == 1) ? glm::vec3(0.f, 0.f, 0.f) : minRange + (maxRange - minRange) * inputSample;

                glm::vec2 normalizedCoordinates(static_cast<float>(c) + sampleOffset.x, static_cast<float>(r) + sampleOffset.y);
                normalizedCoordinates /= currentResolution;

                // Construct ray, send it out into the scene and see what we hit.
                std::shared_ptr<Ray> cameraRay = currentCamera->GenerateRayForNormalizedCoordinates(normalizedCoordinates);
                assert(cameraRay);

                IntersectionState rayIntersection(storedApplication->GetMaxReflectionBounces(), storedApplication->GetMaxRefractionBounces());
                bool didHitScene = currentScene->Trace(cameraRay.get(), &rayIntersection);

                // Use the intersection data to compute the BRDF response.
                glm::vec3 sampleColor;
                if (didHitScene) 
				{
                    sampleColor = currentRenderer->ComputeSampleColor(rayIntersection, *cameraRay.get());
                }
                return sampleColor;
            }), c, r);
        }
    }

    // Apply post-processing steps (i.e. tone-mapper, etc.).
    storedApplication->PerformImagePostprocessing(imageWriter);

    // Now copy whatever is in the HDR data and store it in the bitmap that we will save (aka everything will get clamped to be [0.0, 1.0]).
    imageWriter.CopyHDRToBitmap();

    // Save image.
    imageWriter.SaveImage();
}
Ejemplo n.º 4
0
bool PickingContainer::performTouchPick(TouchEvent* e) {
    if (!pickingEnabled())
        return false;

    std::vector<TouchPoint>& touchPoints = e->getTouchPoints();

    // Clear the picked touch point map
    pickedTouchPoints_.clear();

    if (touchPoints.size() > 1 || touchPoints[0].state() != TouchPoint::TOUCH_STATE_ENDED)
        touchPickingOn_ = true;
    else
        touchPickingOn_ = false;

    std::unordered_map<int, PickingObject*>::iterator touchPickObjs_it;
    std::unordered_map<PickingObject*, std::vector<TouchPoint>>::iterator pickedTouchPoints_it;

    auto touchPoint = touchPoints.begin();
    while (touchPoint != touchPoints.end()) {
        bool isAssociated = false;
        if (touchPoint->state() == TouchPoint::TOUCH_STATE_STARTED) {
            // Find out if new touch point is touching inside a picking object
            uvec2 coord = mousePosToPixelCoordinates(touchPoint->getPos(), e->canvasSize());
            PickingObject* pickObj = findPickingObject(coord);

            // If it is, put it in the TouchIDPickingMap
            if (pickObj) {
                touchPickObjs_.insert(std::pair<int, PickingObject*>(touchPoint->getId(), pickObj));

                // Associate touch point with picking object
                // which can already have other associated touch points.
                pickedTouchPoints_it = pickedTouchPoints_.find(pickObj);
                if (pickedTouchPoints_it != pickedTouchPoints_.end()){
                    pickedTouchPoints_it->second.push_back(*touchPoint);
                }
                else{
                    pickedTouchPoints_.insert(std::pair<PickingObject*, 
                        std::vector<TouchPoint>>(pickObj, std::vector<TouchPoint>{*touchPoint}));
                }
                isAssociated = true;
            }
        }
        else if (touchPoint->state() == TouchPoint::TOUCH_STATE_ENDED) {
            // Erase touch point from TouchIDPickingMap
            size_t numberOfErasedElements = touchPickObjs_.erase(touchPoint->getId());
            isAssociated = (numberOfErasedElements > 0);
        }
        else {
            // Find out if touch point is in the TouchIDPickingMap
            // If it exists, associate touch point with picking object
            touchPickObjs_it = touchPickObjs_.find(touchPoint->getId());
            if (touchPickObjs_it != touchPickObjs_.end()){
                // Associate touch point with picking object
                // which can already have other associated touch points.
                pickedTouchPoints_it = pickedTouchPoints_.find(touchPickObjs_it->second);
                if (pickedTouchPoints_it != pickedTouchPoints_.end()){
                    pickedTouchPoints_it->second.push_back(*touchPoint);
                }
                else{
                    pickedTouchPoints_.insert(std::pair<PickingObject*, 
                        std::vector<TouchPoint>>(touchPickObjs_it->second, std::vector<TouchPoint>{*touchPoint}));
                }
                isAssociated = true;
            }
        }
        // Removed touch point from the actual event if it was associated with a picking object
        if (isAssociated)
            touchPoint = touchPoints.erase(touchPoint);
        else
            ++touchPoint;
    }

    // Build touch event for all picking objects with associated touch points
    for (pickedTouchPoints_it = pickedTouchPoints_.begin(); pickedTouchPoints_it != pickedTouchPoints_.end(); ++pickedTouchPoints_it){
        // Treat one touch point the same as mouse event, for now
        if (pickedTouchPoints_it->second.size() == 1){
            uvec2 coord = mousePosToPixelCoordinates(pickedTouchPoints_it->second[0].getPos(), e->canvasSize());
            if (pickedTouchPoints_it->second[0].state() & TouchPoint::TOUCH_STATE_STARTED){
                pickedTouchPoints_it->first->setPickingPosition(normalizedCoordinates(coord));
                pickedTouchPoints_it->first->setPickingDepth(pickedTouchPoints_it->second[0].getDepth());
                pickedTouchPoints_it->first->setPickingMove(vec2(0.f, 0.f));
            }
            else{
                uvec2 prevCoord = mousePosToPixelCoordinates(pickedTouchPoints_it->second[0].getPrevPos(), e->canvasSize());
                pickedTouchPoints_it->first->setPickingMove(pixelMoveVector(prevCoord, coord));
            }
            // One touch point is currently treated as mouse event as well...
            // So prepare for that
            prevMouseCoord_ = coord;
            mousePickObj_ = pickedTouchPoints_it->first;
            mousePickingOngoing_ = true;
        }

        pickedTouchPoints_it->first->setPickingTouchEvent(TouchEvent(pickedTouchPoints_it->second, e->canvasSize()));
    }

    // One touch point is currently treated as mouse event as well...
    // So prepare for that
    if (touchPoints.size() == 1){
        prevMouseCoord_ = mousePosToPixelCoordinates(touchPoints[0].getPos(), e->canvasSize());
        touchPickingOn_ = false;
    }

    // Mark all picking objects in TouchIDPickingMap as picked.
    for (touchPickObjs_it = touchPickObjs_.begin(); touchPickObjs_it != touchPickObjs_.end(); ++touchPickObjs_it)
        touchPickObjs_it->second->picked();

    return !touchPickObjs_.empty();
}
Ejemplo n.º 5
0
void RayTracer::Run()
{
    // Scene Setup -- Generate the camera and scene.
    std::shared_ptr<Camera> currentCamera = storedApplication->CreateCamera();
    std::shared_ptr<Scene> currentScene = storedApplication->CreateScene();
    std::shared_ptr<ColorSampler> currentSampler = storedApplication->CreateSampler();
    std::shared_ptr<Renderer> currentRenderer = storedApplication->CreateRenderer(currentScene, currentSampler);
    assert(currentScene && currentCamera && currentSampler && currentRenderer);

    currentSampler->InitializeSampler(storedApplication.get(), currentScene.get());

    // Scene preprocessing -- generate acceleration structures, etc.
    // After this call, we are guaranteed that the "acceleration" member of the scene and all scene objects within the scene will be non-NULL.
    currentScene->GenerateDefaultAccelerationData();
    currentScene->Finalize();

    currentRenderer->InitializeRenderer();

    // Prepare for Output
    const glm::vec2 currentResolution = storedApplication->GetImageOutputResolution();
    ImageWriter imageWriter(storedApplication->GetOutputFilename(), static_cast<int>(currentResolution.x), static_cast<int>(currentResolution.y));

    // Perform forward ray tracing
    const int maxSamplesPerPixel = storedApplication->GetSamplesPerPixel();
    assert(maxSamplesPerPixel >= 1);

    // for each pixel on the image
    #pragma omp parallel for num_threads(16)
    for (int y = 0; y < static_cast<int>(currentResolution.y); ++y) {
        for (int x = 0; x < static_cast<int>(currentResolution.x); ++x) {
            // lambda that write pixel samples to image?
            imageWriter.SetPixelColor(currentSampler->ComputeSamplesAndColor(maxSamplesPerPixel, 2, [&](glm::vec3 inputSample) {
                const glm::vec3 minRange(-0.5f, -0.5f, 0.f);
                const glm::vec3 maxRange(0.5f, 0.5f, 0.f);
                const glm::vec3 sampleOffset = (maxSamplesPerPixel == 1) ? glm::vec3(0.f, 0.f, 0.f) : minRange + (maxRange - minRange) * inputSample;

                glm::vec2 normalizedCoordinates(static_cast<float>(x) + sampleOffset.x, static_cast<float>(y) + sampleOffset.y);
                normalizedCoordinates /= currentResolution;

                // Construct ray, send it out into the scene and see what we hit.
                std::shared_ptr<Ray> cameraRay = currentCamera->GenerateRayForNormalizedCoordinates(normalizedCoordinates, 
                                                    storedApplication->GetFocusPlane(),
                                                    storedApplication->GetAperture());
                assert(cameraRay);

                IntersectionState rayIntersection(storedApplication->GetMaxReflectionBounces(), storedApplication->GetMaxRefractionBounces());
                bool didHitScene = currentScene->Trace(cameraRay.get(), &rayIntersection);

                // Use the intersection data to compute the BRDF response.
                glm::vec3 sampleColor;
                if (didHitScene) {
                    sampleColor = currentRenderer->ComputeSampleColor(rayIntersection, *cameraRay.get());
                }
                return sampleColor;
            }), x, y);
        }
    }

    // Apply post-processing steps (i.e. tone-mapper, etc.).
    storedApplication->PerformImagePostprocessing(imageWriter);

    // Now copy whatever is in the HDR data and store it in the bitmap that we will save (aka everything will get clamped to be [0.0, 1.0]).
    imageWriter.CopyHDRToBitmap();

    // Save image.
    imageWriter.SaveImage();
}