void GlobalAccumulationFramebuffer::develop_to_tile( Tile& tile, const size_t origin_x, const size_t origin_y, const size_t tile_x, const size_t tile_y, const float scale) const { const size_t tile_width = tile.get_width(); const size_t tile_height = tile.get_height(); for (size_t y = 0; y < tile_height; ++y) { for (size_t x = 0; x < tile_width; ++x) { Color4f color; color.rgb() = get_pixel( origin_x + x, origin_y + y); color.rgb() *= scale; color.a = 1.0f; tile.set_pixel(x, y, color); } } }
auto_ptr<Image> compare(const Image& lhs, const Image& rhs, const IPixelOp& op) { const CanvasProperties& lhs_props = lhs.properties(); const CanvasProperties& rhs_props = rhs.properties(); if (lhs_props.m_canvas_width != rhs_props.m_canvas_width || lhs_props.m_canvas_height != rhs_props.m_canvas_height || lhs_props.m_channel_count != rhs_props.m_channel_count) throw ExceptionNonMatchingImageCharacteristics(); if (lhs_props.m_channel_count != 4) throw ExceptionUnsupportedChannelCount(); auto_ptr<Image> output(new Image(lhs)); for (size_t y = 0; y < lhs_props.m_canvas_height; ++y) { for (size_t x = 0; x < lhs_props.m_canvas_width; ++x) { Color4f lhs_color; lhs.get_pixel(x, y, lhs_color); Color4f rhs_color; rhs.get_pixel(x, y, rhs_color); Color4f result; result.rgb() = op(lhs_color.rgb(), rhs_color.rgb()); result.a = 1.0f; output->set_pixel(x, y, saturate(result)); } } return output; }
void FrozenDisplayRenderer::capture() { SamplingContext::RNGType rng; SamplingContext sampling_context( rng, m_sampling_mode, 2, // number of dimensions 0, // number of samples -- unknown 0); // initial instance number size_t point_index = 0; for (size_t ty = 0; ty < m_frame_props.m_tile_count_y; ++ty) { for (size_t tx = 0; tx < m_frame_props.m_tile_count_x; ++tx) { const Tile& color_tile = m_color_image.tile(tx, ty); const Tile& depth_tile = m_depth_image.tile(tx, ty); const size_t tile_width = color_tile.get_width(); const size_t tile_height = color_tile.get_height(); for (size_t py = 0; py < tile_height; ++py) { for (size_t px = 0; px < tile_width; ++px) { // Compute film point in NDC. const Vector2d ndc = m_frame.get_sample_position( tx, ty, px, py, 0.5, 0.5); // Retrieve pixel depth. const float depth = depth_tile.get_component<float>(px, py, 0); if (depth >= 0.0f) { // Generate a world space ray going through that film point. ShadingRay ray; m_camera.spawn_ray(sampling_context, Dual2d(ndc), ray); // Retrieve pixel color. Color4f pixel; color_tile.get_pixel(px, py, pixel); // Compute and store world space point and color. RenderPoint point; point.m_position = Vector3f(ray.point_at(depth)); point.m_color = pixel.rgb(); m_points[point_index++] = point; } } } } } }
Color4f Frame::linear_rgb_to_frame(const Color4f& linear_rgb) const { Color4f result; // Transform the input color to the color space of the frame. switch (impl->m_color_space) { case ColorSpaceLinearRGB: result = linear_rgb; break; case ColorSpaceSRGB: result.rgb() = fast_linear_rgb_to_srgb(linear_rgb.rgb()); result.a = linear_rgb.a; break; case ColorSpaceCIEXYZ: result.rgb() = linear_rgb_to_ciexyz(linear_rgb.rgb()); result.a = linear_rgb.a; break; default: assert(!"Invalid target color space."); result = linear_rgb; break; } // Clamp the color. // todo: mark clamped pixels in the diagnostic map. result = impl->m_clamping ? saturate(result) : clamp_to_zero(result); // Gamma-correct color. if (impl->m_gamma_correct) { // todo: investigate the usage of fast_pow() for gamma correction. const float rcp_target_gamma = impl->m_rcp_target_gamma; result[0] = pow(result[0], rcp_target_gamma); result[1] = pow(result[1], rcp_target_gamma); result[2] = pow(result[2], rcp_target_gamma); } return result; }