Exemple #1
0
static PyObject *pygpu_offscreen_draw_view3d(BPy_GPUOffScreen *self, PyObject *args, PyObject *kwds)
{
    static const char *kwlist[] = {"scene", "view3d", "region", "projection_matrix", "modelview_matrix", NULL};

    MatrixObject *py_mat_modelview, *py_mat_projection;
    PyObject *py_scene, *py_region, *py_view3d;

    Scene *scene;
    View3D *v3d;
    ARegion *ar;
    GPUFX *fx;
    GPUFXSettings fx_settings;
    void *rv3d_mats;

    BPY_GPU_OFFSCREEN_CHECK_OBJ(self);

    if (!PyArg_ParseTupleAndKeywords(
                args, kwds, "OOOO&O&:draw_view3d", (char **)(kwlist),
                &py_scene, &py_view3d, &py_region,
                pygpu_offscreen_check_matrix, &py_mat_projection,
                pygpu_offscreen_check_matrix, &py_mat_modelview) ||
            (!(scene    = PyC_RNA_AsPointer(py_scene, "Scene")) ||
             !(v3d      = PyC_RNA_AsPointer(py_view3d, "SpaceView3D")) ||
             !(ar       = PyC_RNA_AsPointer(py_region, "Region"))))
    {
        return NULL;
    }

    fx = GPU_fx_compositor_create();

    fx_settings = v3d->fx_settings;  /* full copy */

    ED_view3d_draw_offscreen_init(scene, v3d);

    rv3d_mats = ED_view3d_mats_rv3d_backup(ar->regiondata);

    GPU_offscreen_bind(self->ofs, true); /* bind */

    ED_view3d_draw_offscreen(
        scene, v3d, ar, GPU_offscreen_width(self->ofs), GPU_offscreen_height(self->ofs),
        (float(*)[4])py_mat_modelview->matrix, (float(*)[4])py_mat_projection->matrix,
        false, true, true, "",
        fx, &fx_settings,
        self->ofs);

    GPU_fx_compositor_destroy(fx);
    GPU_offscreen_unbind(self->ofs, true); /* unbind */

    ED_view3d_mats_rv3d_restore(ar->regiondata, rv3d_mats);
    MEM_freeN(rv3d_mats);

    Py_RETURN_NONE;
}
static void gpu_fx_bind_render_target(int *passes_left, GPUFX *fx, struct GPUOffScreen *ofs, GPUTexture *target)
{
	if ((*passes_left)-- == 1) {
		GPU_framebuffer_texture_unbind(fx->gbuffer, NULL);
		if (ofs) {
			GPU_offscreen_bind(ofs, false);
		}
		else
			GPU_framebuffer_restore();
	}
	else {
		/* bind the ping buffer to the color buffer */
		GPU_framebuffer_texture_attach(fx->gbuffer, target, 0, NULL);
	}
}
Exemple #3
0
/* Transform buffer from role to scene linear space using GLSL OCIO conversion
 *
 * See IMB_colormanagement_setup_transform_from_role_glsl description for
 * some more details
 *
 * NOTE: this only works for RGBA buffers!
 */
int glaBufferTransformFromRole_glsl(float *buffer, int width, int height, int role)
{
	GPUOffScreen *ofs;
	char err_out[256];
	rcti display_rect;

	ofs = GPU_offscreen_create(width, height, err_out);

	if (!ofs)
		return FALSE;

	GPU_offscreen_bind(ofs);

	if (!IMB_colormanagement_setup_transform_from_role_glsl(role, TRUE)) {
		GPU_offscreen_unbind(ofs);
		GPU_offscreen_free(ofs);
		return FALSE;
	}

	BLI_rcti_init(&display_rect, 0, width, 0, height);

	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glMatrixMode(GL_MODELVIEW);
	glPushMatrix();

	glaDefine2DArea(&display_rect);

	glaDrawPixelsTex(0, 0, width, height, GL_RGBA, GL_FLOAT,
	                 GL_NEAREST, buffer);

	glMatrixMode(GL_PROJECTION);
	glPopMatrix();
	glMatrixMode(GL_MODELVIEW);
	glPopMatrix();

	GPU_offscreen_read_pixels(ofs, GL_FLOAT, buffer);

	IMB_colormanagement_finish_glsl_transform();

	/* unbind */
	GPU_offscreen_unbind(ofs);
	GPU_offscreen_free(ofs);

	return TRUE;
}
Exemple #4
0
static PyObject *pygpu_offscreen_bind(BPy_GPUOffScreen *self, PyObject *args, PyObject *kwds)
{
    static const char *kwlist[] = {"save", NULL};
    bool save = true;

    BPY_GPU_OFFSCREEN_CHECK_OBJ(self);

    if (!PyArg_ParseTupleAndKeywords(
                args, kwds, "|O&:bind", (char **)(kwlist),
                PyC_ParseBool, &save))
    {
        return NULL;
    }

    GPU_offscreen_bind(self->ofs, save);
    Py_RETURN_NONE;
}
Exemple #5
0
static PyObject *bpygpu_offscreen_bind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
  BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
  bool save = true;

  static const char *_keywords[] = {"save", NULL};
  static _PyArg_Parser _parser = {"|O&:bind", _keywords, 0};
  if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &save)) {
    return NULL;
  }

  GPU_offscreen_bind(self->ofs, save);

  self->is_saved = save;
  Py_INCREF(self);

  return (PyObject *)self;
}
Exemple #6
0
static void wm_draw_region_bind(ARegion *ar, int view)
{
  if (!ar->draw_buffer) {
    return;
  }

  if (ar->draw_buffer->viewport[view]) {
    GPU_viewport_bind(ar->draw_buffer->viewport[view], &ar->winrct);
  }
  else {
    GPU_offscreen_bind(ar->draw_buffer->offscreen[view], false);

    /* For now scissor is expected by region drawing, we could disable it
     * and do the enable/disable in the specific cases that setup scissor. */
    glEnable(GL_SCISSOR_TEST);
    glScissor(0, 0, ar->winx, ar->winy);
  }

  ar->draw_buffer->bound_view = view;
}
Exemple #7
0
static PyObject *bpygpu_offscreen_draw_view3d(BPyGPUOffScreen *self,
                                              PyObject *args,
                                              PyObject *kwds)
{
  MatrixObject *py_mat_view, *py_mat_projection;
  PyObject *py_scene, *py_view_layer, *py_region, *py_view3d;

  struct Depsgraph *depsgraph;
  struct Scene *scene;
  struct ViewLayer *view_layer;
  View3D *v3d;
  ARegion *ar;
  struct RV3DMatrixStore *rv3d_mats;

  BPY_GPU_OFFSCREEN_CHECK_OBJ(self);

  static const char *_keywords[] = {
      "scene", "view_layer", "view3d", "region", "view_matrix", "projection_matrix", NULL};

  static _PyArg_Parser _parser = {"OOOOO&O&:draw_view3d", _keywords, 0};
  if (!_PyArg_ParseTupleAndKeywordsFast(args,
                                        kwds,
                                        &_parser,
                                        &py_scene,
                                        &py_view_layer,
                                        &py_view3d,
                                        &py_region,
                                        Matrix_Parse4x4,
                                        &py_mat_view,
                                        Matrix_Parse4x4,
                                        &py_mat_projection) ||
      (!(scene = PyC_RNA_AsPointer(py_scene, "Scene")) ||
       !(view_layer = PyC_RNA_AsPointer(py_view_layer, "ViewLayer")) ||
       !(v3d = PyC_RNA_AsPointer(py_view3d, "SpaceView3D")) ||
       !(ar = PyC_RNA_AsPointer(py_region, "Region")))) {
    return NULL;
  }

  BLI_assert(BKE_id_is_in_global_main(&scene->id));

  depsgraph = BKE_scene_get_depsgraph(scene, view_layer, true);

  rv3d_mats = ED_view3d_mats_rv3d_backup(ar->regiondata);

  GPU_offscreen_bind(self->ofs, true);

  ED_view3d_draw_offscreen(depsgraph,
                           scene,
                           v3d->shading.type,
                           v3d,
                           ar,
                           GPU_offscreen_width(self->ofs),
                           GPU_offscreen_height(self->ofs),
                           (float(*)[4])py_mat_view->matrix,
                           (float(*)[4])py_mat_projection->matrix,
                           false,
                           true,
                           "",
                           true,
                           self->ofs,
                           NULL);

  GPU_offscreen_unbind(self->ofs, true);

  ED_view3d_mats_rv3d_restore(ar->regiondata, rv3d_mats);
  MEM_freeN(rv3d_mats);

  Py_RETURN_NONE;
}
bool GPU_fx_do_composite_pass(GPUFX *fx, float projmat[4][4], bool is_persp, struct Scene *scene, struct GPUOffScreen *ofs)
{
	GPUTexture *src, *target;
	int numslots = 0;
	float invproj[4][4];
	int i;
	/* number of passes left. when there are no more passes, the result is passed to the frambuffer */
	int passes_left = fx->num_passes;
	/* view vectors for the corners of the view frustum. Can be used to recreate the world space position easily */
	float viewvecs[3][4] = {
	    {-1.0f, -1.0f, -1.0f, 1.0f},
	    {1.0f, -1.0f, -1.0f, 1.0f},
	    {-1.0f, 1.0f, -1.0f, 1.0f}
	};

	if (fx->effects == 0)
		return false;

	/* first, unbind the render-to-texture framebuffer */
	GPU_framebuffer_texture_detach(fx->color_buffer);
	GPU_framebuffer_texture_detach(fx->depth_buffer);

	if (fx->restore_stencil)
		glPopAttrib();

	src = fx->color_buffer;
	target = fx->color_buffer_sec;

	/* set up quad buffer */
	glVertexPointer(2, GL_FLOAT, 0, fullscreencos);
	glTexCoordPointer(2, GL_FLOAT, 0, fullscreenuvs);
	glEnableClientState(GL_VERTEX_ARRAY);
	glEnableClientState(GL_TEXTURE_COORD_ARRAY);

	/* full screen FX pass */

	/* invert the view matrix */
	invert_m4_m4(invproj, projmat);

	/* convert the view vectors to view space */
	for (i = 0; i < 3; i++) {
		mul_m4_v4(invproj, viewvecs[i]);
		/* normalized trick see http://www.derschmale.com/2014/01/26/reconstructing-positions-from-the-depth-buffer */
		mul_v3_fl(viewvecs[i], 1.0f / viewvecs[i][3]);
		if (is_persp)
			mul_v3_fl(viewvecs[i], 1.0f / viewvecs[i][2]);
		viewvecs[i][3] = 1.0;
	}

	/* we need to store the differences */
	viewvecs[1][0] -= viewvecs[0][0];
	viewvecs[1][1] = viewvecs[2][1] - viewvecs[0][1];

	/* calculate a depth offset as well */
	if (!is_persp) {
		float vec_far[] = {-1.0f, -1.0f, 1.0f, 1.0f};
		mul_m4_v4(invproj, vec_far);
		mul_v3_fl(vec_far, 1.0f / vec_far[3]);
		viewvecs[1][2] = vec_far[2] - viewvecs[0][2];
	}

	/* set invalid color in case shader fails */
	glColor3f(1.0, 0.0, 1.0);
	glDisable(GL_DEPTH_TEST);

	/* ssao pass */
	if (fx->effects & GPU_FX_FLAG_SSAO) {
		GPUShader *ssao_shader;
		ssao_shader = GPU_shader_get_builtin_fx_shader(GPU_SHADER_FX_SSAO, is_persp);
		if (ssao_shader) {
			const GPUSSAOSettings *fx_ssao = fx->settings.ssao;
			int color_uniform, depth_uniform;
			int ssao_uniform, ssao_color_uniform, viewvecs_uniform, ssao_sample_params_uniform;
			int ssao_jitter_uniform, ssao_concentric_tex;
			float ssao_params[4] = {fx_ssao->distance_max, fx_ssao->factor, fx_ssao->attenuation, 0.0f};
			float sample_params[4];

			sample_params[0] = fx->ssao_sample_count;
			/* multiplier so we tile the random texture on screen */
			sample_params[2] = fx->gbuffer_dim[0] / 64.0;
			sample_params[3] = fx->gbuffer_dim[1] / 64.0;

			ssao_uniform = GPU_shader_get_uniform(ssao_shader, "ssao_params");
			ssao_color_uniform = GPU_shader_get_uniform(ssao_shader, "ssao_color");
			color_uniform = GPU_shader_get_uniform(ssao_shader, "colorbuffer");
			depth_uniform = GPU_shader_get_uniform(ssao_shader, "depthbuffer");
			viewvecs_uniform = GPU_shader_get_uniform(ssao_shader, "viewvecs");
			ssao_sample_params_uniform = GPU_shader_get_uniform(ssao_shader, "ssao_sample_params");
			ssao_concentric_tex = GPU_shader_get_uniform(ssao_shader, "ssao_concentric_tex");
			ssao_jitter_uniform = GPU_shader_get_uniform(ssao_shader, "jitter_tex");

			GPU_shader_bind(ssao_shader);

			GPU_shader_uniform_vector(ssao_shader, ssao_uniform, 4, 1, ssao_params);
			GPU_shader_uniform_vector(ssao_shader, ssao_color_uniform, 4, 1, fx_ssao->color);
			GPU_shader_uniform_vector(ssao_shader, viewvecs_uniform, 4, 3, viewvecs[0]);
			GPU_shader_uniform_vector(ssao_shader, ssao_sample_params_uniform, 4, 1, sample_params);

			GPU_texture_bind(src, numslots++);
			GPU_shader_uniform_texture(ssao_shader, color_uniform, src);

			GPU_texture_bind(fx->depth_buffer, numslots++);
			GPU_depth_texture_mode(fx->depth_buffer, false, true);
			GPU_shader_uniform_texture(ssao_shader, depth_uniform, fx->depth_buffer);

			GPU_texture_bind(fx->jitter_buffer, numslots++);
			GPU_shader_uniform_texture(ssao_shader, ssao_jitter_uniform, fx->jitter_buffer);

			GPU_texture_bind(fx->ssao_concentric_samples_tex, numslots++);
			GPU_shader_uniform_texture(ssao_shader, ssao_concentric_tex, fx->ssao_concentric_samples_tex);

			/* draw */
			gpu_fx_bind_render_target(&passes_left, fx, ofs, target);

			glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

			/* disable bindings */
			GPU_texture_unbind(src);
			GPU_depth_texture_mode(fx->depth_buffer, true, false);
			GPU_texture_unbind(fx->depth_buffer);
			GPU_texture_unbind(fx->jitter_buffer);
			GPU_texture_unbind(fx->ssao_concentric_samples_tex);

			/* may not be attached, in that case this just returns */
			if (target) {
				GPU_framebuffer_texture_detach(target);
				if (ofs) {
					GPU_offscreen_bind(ofs, false);
				}
				else {
					GPU_framebuffer_restore();
				}
			}

			/* swap here, after src/target have been unbound */
			SWAP(GPUTexture *, target, src);
			numslots = 0;
		}
	}

	/* second pass, dof */
	if (fx->effects & GPU_FX_FLAG_DOF) {
		const GPUDOFSettings *fx_dof = fx->settings.dof;
		GPUShader *dof_shader_pass1, *dof_shader_pass2, *dof_shader_pass3, *dof_shader_pass4, *dof_shader_pass5;
		float dof_params[4];
		float scale = scene->unit.system ? scene->unit.scale_length : 1.0f;
		/* this is factor that converts to the scene scale. focal length and sensor are expressed in mm
		 * unit.scale_length is how many meters per blender unit we have. We want to convert to blender units though
		 * because the shader reads coordinates in world space, which is in blender units. */
		float scale_camera = 0.001f / scale;
		/* we want radius here for the aperture number  */
		float aperture = 0.5f * scale_camera * fx_dof->focal_length / fx_dof->fstop;

		dof_params[0] = aperture * fabsf(scale_camera * fx_dof->focal_length / ((fx_dof->focus_distance / scale) - scale_camera * fx_dof->focal_length));
		dof_params[1] = fx_dof->focus_distance / scale;
		dof_params[2] = fx->gbuffer_dim[0] / (scale_camera * fx_dof->sensor);
		dof_params[3] = 0.0f;

		/* DOF effect has many passes but most of them are performed on a texture whose dimensions are 4 times less than the original
		 * (16 times lower than original screen resolution). Technique used is not very exact but should be fast enough and is based
		 * on "Practical Post-Process Depth of Field" see http://http.developer.nvidia.com/GPUGems3/gpugems3_ch28.html */
		dof_shader_pass1 = GPU_shader_get_builtin_fx_shader(GPU_SHADER_FX_DEPTH_OF_FIELD_PASS_ONE, is_persp);
		dof_shader_pass2 = GPU_shader_get_builtin_fx_shader(GPU_SHADER_FX_DEPTH_OF_FIELD_PASS_TWO, is_persp);
		dof_shader_pass3 = GPU_shader_get_builtin_fx_shader(GPU_SHADER_FX_DEPTH_OF_FIELD_PASS_THREE, is_persp);
		dof_shader_pass4 = GPU_shader_get_builtin_fx_shader(GPU_SHADER_FX_DEPTH_OF_FIELD_PASS_FOUR, is_persp);
		dof_shader_pass5 = GPU_shader_get_builtin_fx_shader(GPU_SHADER_FX_DEPTH_OF_FIELD_PASS_FIVE, is_persp);

		/* error occured, restore framebuffers and return */
		if (!(dof_shader_pass1 && dof_shader_pass2 && dof_shader_pass3 && dof_shader_pass4 && dof_shader_pass5)) {
			GPU_framebuffer_texture_unbind(fx->gbuffer, NULL);
			GPU_framebuffer_restore();
			return false;
		}

		/* pass first, first level of blur in low res buffer */
		{
			int invrendertargetdim_uniform, color_uniform, depth_uniform, dof_uniform;
			int viewvecs_uniform;

			float invrendertargetdim[2] = {1.0f / fx->gbuffer_dim[0], 1.0f / fx->gbuffer_dim[1]};

			dof_uniform = GPU_shader_get_uniform(dof_shader_pass1, "dof_params");
			invrendertargetdim_uniform = GPU_shader_get_uniform(dof_shader_pass1, "invrendertargetdim");
			color_uniform = GPU_shader_get_uniform(dof_shader_pass1, "colorbuffer");
			depth_uniform = GPU_shader_get_uniform(dof_shader_pass1, "depthbuffer");
			viewvecs_uniform = GPU_shader_get_uniform(dof_shader_pass1, "viewvecs");

			GPU_shader_bind(dof_shader_pass1);

			GPU_shader_uniform_vector(dof_shader_pass1, dof_uniform, 4, 1, dof_params);
			GPU_shader_uniform_vector(dof_shader_pass1, invrendertargetdim_uniform, 2, 1, invrendertargetdim);
			GPU_shader_uniform_vector(dof_shader_pass1, viewvecs_uniform, 4, 3, viewvecs[0]);

			GPU_texture_bind(src, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass1, color_uniform, src);

			GPU_texture_bind(fx->depth_buffer, numslots++);
			GPU_depth_texture_mode(fx->depth_buffer, false, true);
			GPU_shader_uniform_texture(dof_shader_pass1, depth_uniform, fx->depth_buffer);

			/* target is the downsampled coc buffer */
			GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_buffer, 0, NULL);
			/* binding takes care of setting the viewport to the downsampled size */
			GPU_texture_bind_as_framebuffer(fx->dof_near_coc_buffer);

			glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
			/* disable bindings */
			GPU_texture_unbind(src);
			GPU_depth_texture_mode(fx->depth_buffer, true, false);
			GPU_texture_unbind(fx->depth_buffer);

			GPU_framebuffer_texture_detach(fx->dof_near_coc_buffer);
			numslots = 0;
		}

		/* second pass, gaussian blur the downsampled image */
		{
			int invrendertargetdim_uniform, color_uniform, depth_uniform, dof_uniform;
			int viewvecs_uniform;
			float invrendertargetdim[2] = {1.0f / GPU_texture_opengl_width(fx->dof_near_coc_blurred_buffer),
			                               1.0f / GPU_texture_opengl_height(fx->dof_near_coc_blurred_buffer)};
			float tmp = invrendertargetdim[0];
			invrendertargetdim[0] = 0.0f;

			dof_params[2] = GPU_texture_opengl_width(fx->dof_near_coc_blurred_buffer) / (scale_camera * fx_dof->sensor);

			dof_uniform = GPU_shader_get_uniform(dof_shader_pass2, "dof_params");
			invrendertargetdim_uniform = GPU_shader_get_uniform(dof_shader_pass2, "invrendertargetdim");
			color_uniform = GPU_shader_get_uniform(dof_shader_pass2, "colorbuffer");
			depth_uniform = GPU_shader_get_uniform(dof_shader_pass2, "depthbuffer");
			viewvecs_uniform = GPU_shader_get_uniform(dof_shader_pass2, "viewvecs");

			/* Blurring vertically */
			GPU_shader_bind(dof_shader_pass2);

			GPU_shader_uniform_vector(dof_shader_pass2, dof_uniform, 4, 1, dof_params);
			GPU_shader_uniform_vector(dof_shader_pass2, invrendertargetdim_uniform, 2, 1, invrendertargetdim);
			GPU_shader_uniform_vector(dof_shader_pass2, viewvecs_uniform, 4, 3, viewvecs[0]);

			GPU_texture_bind(fx->depth_buffer, numslots++);
			GPU_depth_texture_mode(fx->depth_buffer, false, true);
			GPU_shader_uniform_texture(dof_shader_pass2, depth_uniform, fx->depth_buffer);

			GPU_texture_bind(fx->dof_near_coc_buffer, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass2, color_uniform, fx->dof_near_coc_buffer);

			/* use final buffer as a temp here */
			GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_final_buffer, 0, NULL);

			/* Drawing quad */
			glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

			/* *unbind/detach */
			GPU_texture_unbind(fx->dof_near_coc_buffer);
			GPU_framebuffer_texture_detach(fx->dof_near_coc_final_buffer);

			/* Blurring horizontally */
			invrendertargetdim[0] = tmp;
			invrendertargetdim[1] = 0.0f;
			GPU_shader_uniform_vector(dof_shader_pass2, invrendertargetdim_uniform, 2, 1, invrendertargetdim);

			GPU_texture_bind(fx->dof_near_coc_final_buffer, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass2, color_uniform, fx->dof_near_coc_final_buffer);

			GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_blurred_buffer, 0, NULL);
			glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

			/* *unbind/detach */
			GPU_depth_texture_mode(fx->depth_buffer, true, false);
			GPU_texture_unbind(fx->depth_buffer);

			GPU_texture_unbind(fx->dof_near_coc_final_buffer);
			GPU_framebuffer_texture_detach(fx->dof_near_coc_blurred_buffer);

			dof_params[2] = fx->gbuffer_dim[0] / (scale_camera * fx_dof->sensor);

			numslots = 0;
		}

		/* third pass, calculate near coc */
		{
			int near_coc_downsampled, near_coc_blurred;

			near_coc_downsampled = GPU_shader_get_uniform(dof_shader_pass3, "colorbuffer");
			near_coc_blurred = GPU_shader_get_uniform(dof_shader_pass3, "blurredcolorbuffer");

			GPU_shader_bind(dof_shader_pass3);

			GPU_texture_bind(fx->dof_near_coc_buffer, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass3, near_coc_downsampled, fx->dof_near_coc_buffer);

			GPU_texture_bind(fx->dof_near_coc_blurred_buffer, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass3, near_coc_blurred, fx->dof_near_coc_blurred_buffer);

			GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_final_buffer, 0, NULL);

			glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
			/* disable bindings */
			GPU_texture_unbind(fx->dof_near_coc_buffer);
			GPU_texture_unbind(fx->dof_near_coc_blurred_buffer);

			/* unbinding here restores the size to the original */
			GPU_framebuffer_texture_detach(fx->dof_near_coc_final_buffer);

			numslots = 0;
		}

		/* fourth pass blur final coc once to eliminate discontinuities */
		{
			int near_coc_downsampled;
			int invrendertargetdim_uniform;
			float invrendertargetdim[2] = {1.0f / GPU_texture_opengl_width(fx->dof_near_coc_blurred_buffer),
			                               1.0f / GPU_texture_opengl_height(fx->dof_near_coc_blurred_buffer)};

			near_coc_downsampled = GPU_shader_get_uniform(dof_shader_pass4, "colorbuffer");
			invrendertargetdim_uniform = GPU_shader_get_uniform(dof_shader_pass4, "invrendertargetdim");

			GPU_shader_bind(dof_shader_pass4);

			GPU_texture_bind(fx->dof_near_coc_final_buffer, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass4, near_coc_downsampled, fx->dof_near_coc_final_buffer);
			GPU_shader_uniform_vector(dof_shader_pass4, invrendertargetdim_uniform, 2, 1, invrendertargetdim);

			GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_buffer, 0, NULL);

			glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
			/* disable bindings */
			GPU_texture_unbind(fx->dof_near_coc_final_buffer);

			/* unbinding here restores the size to the original */
			GPU_framebuffer_texture_unbind(fx->gbuffer, fx->dof_near_coc_buffer);
			GPU_framebuffer_texture_detach(fx->dof_near_coc_buffer);

			numslots = 0;
		}

		/* final pass, merge blurred layers according to final calculated coc */
		{
			int medium_blurred_uniform, high_blurred_uniform, original_uniform, depth_uniform, dof_uniform;
			int invrendertargetdim_uniform, viewvecs_uniform;
			float invrendertargetdim[2] = {1.0f / fx->gbuffer_dim[0], 1.0f / fx->gbuffer_dim[1]};

			medium_blurred_uniform = GPU_shader_get_uniform(dof_shader_pass5, "mblurredcolorbuffer");
			high_blurred_uniform = GPU_shader_get_uniform(dof_shader_pass5, "blurredcolorbuffer");
			dof_uniform = GPU_shader_get_uniform(dof_shader_pass5, "dof_params");
			invrendertargetdim_uniform = GPU_shader_get_uniform(dof_shader_pass5, "invrendertargetdim");
			original_uniform = GPU_shader_get_uniform(dof_shader_pass5, "colorbuffer");
			depth_uniform = GPU_shader_get_uniform(dof_shader_pass5, "depthbuffer");
			viewvecs_uniform = GPU_shader_get_uniform(dof_shader_pass5, "viewvecs");

			GPU_shader_bind(dof_shader_pass5);

			GPU_shader_uniform_vector(dof_shader_pass5, dof_uniform, 4, 1, dof_params);
			GPU_shader_uniform_vector(dof_shader_pass5, invrendertargetdim_uniform, 2, 1, invrendertargetdim);
			GPU_shader_uniform_vector(dof_shader_pass5, viewvecs_uniform, 4, 3, viewvecs[0]);

			GPU_texture_bind(src, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass5, original_uniform, src);

			GPU_texture_bind(fx->dof_near_coc_blurred_buffer, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass5, high_blurred_uniform, fx->dof_near_coc_blurred_buffer);

			GPU_texture_bind(fx->dof_near_coc_buffer, numslots++);
			GPU_shader_uniform_texture(dof_shader_pass5, medium_blurred_uniform, fx->dof_near_coc_buffer);

			GPU_texture_bind(fx->depth_buffer, numslots++);
			GPU_depth_texture_mode(fx->depth_buffer, false, true);
			GPU_shader_uniform_texture(dof_shader_pass5, depth_uniform, fx->depth_buffer);

			/* if this is the last pass, prepare for rendering on the frambuffer */
			gpu_fx_bind_render_target(&passes_left, fx, ofs, target);

			glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
			/* disable bindings */
			GPU_texture_unbind(fx->dof_near_coc_buffer);
			GPU_texture_unbind(fx->dof_near_coc_blurred_buffer);
			GPU_texture_unbind(src);
			GPU_depth_texture_mode(fx->depth_buffer, true, false);
			GPU_texture_unbind(fx->depth_buffer);

			/* may not be attached, in that case this just returns */
			if (target) {
				GPU_framebuffer_texture_detach(target);
				if (ofs) {
					GPU_offscreen_bind(ofs, false);
				}
				else {
					GPU_framebuffer_restore();
				}
			}

			SWAP(GPUTexture *, target, src);
			numslots = 0;
		}
	}

	glDisableClientState(GL_VERTEX_ARRAY);
	glDisableClientState(GL_TEXTURE_COORD_ARRAY);

	GPU_shader_unbind();

	return true;
}
Exemple #9
0
static void screen_opengl_render_doit(OGLRender *oglrender, RenderResult *rr)
{
	Scene *scene = oglrender->scene;
	ARegion *ar = oglrender->ar;
	View3D *v3d = oglrender->v3d;
	RegionView3D *rv3d = oglrender->rv3d;
	Object *camera = NULL;
	ImBuf *ibuf;
	float winmat[4][4];
	int sizex = oglrender->sizex;
	int sizey = oglrender->sizey;
	const short view_context = (v3d != NULL);
	bool draw_bgpic = true;
	bool draw_sky = (scene->r.alphamode == R_ADDSKY);
	unsigned char *rect = NULL;
	const char *viewname = RE_GetActiveRenderView(oglrender->re);

	if (oglrender->is_sequencer) {
		SeqRenderData context;
		SpaceSeq *sseq = oglrender->sseq;
		int chanshown = sseq ? sseq->chanshown : 0;
		struct bGPdata *gpd = (sseq && (sseq->flag & SEQ_SHOW_GPENCIL)) ? sseq->gpd : NULL;

		BKE_sequencer_new_render_data(
		        oglrender->bmain->eval_ctx, oglrender->bmain, scene,
		        oglrender->sizex, oglrender->sizey, 100.0f,
		        &context);

		context.view_id = BKE_scene_multiview_view_id_get(&scene->r, viewname);
		ibuf = BKE_sequencer_give_ibuf(&context, CFRA, chanshown);

		if (ibuf) {
			float *rectf;
			ImBuf *linear_ibuf;

			BLI_assert((oglrender->sizex == ibuf->x) && (oglrender->sizey == ibuf->y));

			linear_ibuf = IMB_dupImBuf(ibuf);
			IMB_freeImBuf(ibuf);

			if (linear_ibuf->rect_float == NULL) {
				/* internally sequencer working in display space and stores both bytes and float buffers in that space.
				 * It is possible that byte->float onversion didn't happen in sequencer (e.g. when adding image sequence/movie
				 * into sequencer) there'll be only byte buffer. Create float buffer from existing byte buffer, making it linear
				 */

				IMB_float_from_rect(linear_ibuf);
			}
			else {
				/* ensure float buffer is in linear space, not in display space */
				BKE_sequencer_imbuf_from_sequencer_space(scene, linear_ibuf);
			}

			rectf = RE_RenderViewGetRectf(rr, oglrender->view_id);
			memcpy(rectf, linear_ibuf->rect_float, sizeof(float) * 4 * oglrender->sizex * oglrender->sizey);

			IMB_freeImBuf(linear_ibuf);
		}

		if (gpd) {
			int i;
			unsigned char *gp_rect;

			GPU_offscreen_bind(oglrender->ofs, true);

			glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
			glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

			wmOrtho2(0, sizex, 0, sizey);
			glTranslatef(sizex / 2, sizey / 2, 0.0f);

			G.f |= G_RENDER_OGL;
			ED_gpencil_draw_ex(scene, gpd, sizex, sizey, scene->r.cfra, SPACE_SEQ);
			G.f &= ~G_RENDER_OGL;

			gp_rect = MEM_mallocN(sizex * sizey * sizeof(unsigned char) * 4, "offscreen rect");
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, gp_rect);

			for (i = 0; i < sizex * sizey * 4; i += 4) {
				float  col_src[4];
				rgba_uchar_to_float(col_src, &gp_rect[i]);
				blend_color_mix_float(&rr->rectf[i], &rr->rectf[i], col_src);
			}
			GPU_offscreen_unbind(oglrender->ofs, true);

			MEM_freeN(gp_rect);
		}
	}
	else if (view_context) {
		bool is_persp;
		/* full copy */
		GPUFXSettings fx_settings = v3d->fx_settings;

		ED_view3d_draw_offscreen_init(scene, v3d);

		GPU_offscreen_bind(oglrender->ofs, true); /* bind */

		/* render 3d view */
		if (rv3d->persp == RV3D_CAMOB && v3d->camera) {
			/*int is_ortho = scene->r.mode & R_ORTHO;*/
			camera = BKE_camera_multiview_render(oglrender->scene, v3d->camera, viewname);
			RE_GetCameraWindow(oglrender->re, camera, scene->r.cfra, winmat);
			if (camera->type == OB_CAMERA) {
				Camera *cam = camera->data;
				is_persp = cam->type == CAM_PERSP;
			}
			else
				is_persp = true;
			BKE_camera_to_gpu_dof(camera, &fx_settings);
		}
		else {
			rctf viewplane;
			float clipsta, clipend;

			bool is_ortho = ED_view3d_viewplane_get(v3d, rv3d, sizex, sizey, &viewplane, &clipsta, &clipend, NULL);
			if (is_ortho) orthographic_m4(winmat, viewplane.xmin, viewplane.xmax, viewplane.ymin, viewplane.ymax, -clipend, clipend);
			else perspective_m4(winmat, viewplane.xmin, viewplane.xmax, viewplane.ymin, viewplane.ymax, clipsta, clipend);

			is_persp = !is_ortho;
		}

		rect = MEM_mallocN(sizex * sizey * sizeof(unsigned char) * 4, "offscreen rect");

		if ((scene->r.mode & R_OSA) == 0) {
			ED_view3d_draw_offscreen(
			        scene, v3d, ar, sizex, sizey, NULL, winmat,
			        draw_bgpic, draw_sky, is_persp,
			        oglrender->ofs, oglrender->fx, &fx_settings, viewname);
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);
		}
		else {
			/* simple accumulation, less hassle then FSAA FBO's */
			static float jit_ofs[32][2];
			float winmat_jitter[4][4];
			int *accum_buffer = MEM_mallocN(sizex * sizey * sizeof(int) * 4, "accum1");
			int i, j;

			BLI_jitter_init(jit_ofs, scene->r.osa);

			/* first sample buffer, also initializes 'rv3d->persmat' */
			ED_view3d_draw_offscreen(
			        scene, v3d, ar, sizex, sizey, NULL, winmat,
			        draw_bgpic, draw_sky, is_persp,
			        oglrender->ofs, oglrender->fx, &fx_settings, viewname);
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);

			for (i = 0; i < sizex * sizey * 4; i++)
				accum_buffer[i] = rect[i];

			/* skip the first sample */
			for (j = 1; j < scene->r.osa; j++) {
				copy_m4_m4(winmat_jitter, winmat);
				window_translate_m4(winmat_jitter, rv3d->persmat,
				                    (jit_ofs[j][0] * 2.0f) / sizex,
				                    (jit_ofs[j][1] * 2.0f) / sizey);

				ED_view3d_draw_offscreen(
				        scene, v3d, ar, sizex, sizey, NULL, winmat_jitter,
				        draw_bgpic, draw_sky, is_persp,
				        oglrender->ofs, oglrender->fx, &fx_settings, viewname);
				GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);

				for (i = 0; i < sizex * sizey * 4; i++)
					accum_buffer[i] += rect[i];
			}

			for (i = 0; i < sizex * sizey * 4; i++)
				rect[i] = accum_buffer[i] / scene->r.osa;

			MEM_freeN(accum_buffer);
		}

		GPU_offscreen_unbind(oglrender->ofs, true); /* unbind */
	}
	else {
		/* shouldnt suddenly give errors mid-render but possible */
		char err_out[256] = "unknown";
		ImBuf *ibuf_view = ED_view3d_draw_offscreen_imbuf_simple(scene, scene->camera, oglrender->sizex, oglrender->sizey,
		                                                         IB_rect, OB_SOLID, false, true, true,
		                                                         (draw_sky) ? R_ADDSKY : R_ALPHAPREMUL, viewname, err_out);
		camera = scene->camera;

		if (ibuf_view) {
			/* steal rect reference from ibuf */
			rect = (unsigned char *)ibuf_view->rect;
			ibuf_view->mall &= ~IB_rect;

			IMB_freeImBuf(ibuf_view);
		}
		else {
			fprintf(stderr, "%s: failed to get buffer, %s\n", __func__, err_out);
		}
	}

	/* note on color management:
	 *
	 * OpenGL renders into sRGB colors, but render buffers are expected to be
	 * linear So we convert to linear here, so the conversion back to bytes can make it
	 * sRGB (or other display space) again, and so that e.g. openexr saving also saves the
	 * correct linear float buffer.
	 */

	if (rect) {
		int profile_to;
		float *rectf = RE_RenderViewGetRectf(rr, oglrender->view_id);
		
		if (BKE_scene_check_color_management_enabled(scene))
			profile_to = IB_PROFILE_LINEAR_RGB;
		else
			profile_to = IB_PROFILE_SRGB;

		/* sequencer has got trickier conversion happened above
		 * also assume opengl's space matches byte buffer color space */
		IMB_buffer_float_from_byte(rectf, rect,
		                           profile_to, IB_PROFILE_SRGB, true,
		                           oglrender->sizex, oglrender->sizey, oglrender->sizex, oglrender->sizex);

		/* rr->rectf is now filled with image data */

		if ((scene->r.stamp & R_STAMP_ALL) && (scene->r.stamp & R_STAMP_DRAW))
			BKE_image_stamp_buf(scene, camera, rect, rectf, rr->rectx, rr->recty, 4);

		MEM_freeN(rect);
	}
}
Exemple #10
0
static void screen_opengl_render_apply(OGLRender *oglrender)
{
	Scene *scene = oglrender->scene;
	ARegion *ar = oglrender->ar;
	View3D *v3d = oglrender->v3d;
	RegionView3D *rv3d = oglrender->rv3d;
	RenderResult *rr;
	Object *camera = NULL;
	ImBuf *ibuf;
	void *lock;
	float winmat[4][4];
	int sizex = oglrender->sizex;
	int sizey = oglrender->sizey;
	const short view_context = (v3d != NULL);
	bool draw_bgpic = true;
	bool draw_sky = (scene->r.alphamode == R_ADDSKY);
	unsigned char *rect = NULL;

	rr = RE_AcquireResultRead(oglrender->re);

	if (oglrender->is_sequencer) {
		SeqRenderData context;
		int chanshown = oglrender->sseq ? oglrender->sseq->chanshown : 0;

		context = BKE_sequencer_new_render_data(oglrender->bmain, scene, oglrender->sizex, oglrender->sizey, 100.0f);

		ibuf = BKE_sequencer_give_ibuf(context, CFRA, chanshown);

		if (ibuf) {
			ImBuf *linear_ibuf;

			BLI_assert((oglrender->sizex == ibuf->x) && (oglrender->sizey == ibuf->y));

			linear_ibuf = IMB_dupImBuf(ibuf);
			IMB_freeImBuf(ibuf);

			if (linear_ibuf->rect_float == NULL) {
				/* internally sequencer working in display space and stores both bytes and float buffers in that space.
				 * It is possible that byte->float onversion didn't happen in sequencer (e.g. when adding image sequence/movie
				 * into sequencer) there'll be only byte buffer. Create float buffer from existing byte buffer, making it linear
				 */

				IMB_float_from_rect(linear_ibuf);
			}
			else {
				/* ensure float buffer is in linear space, not in display space */
				BKE_sequencer_imbuf_from_sequencer_space(scene, linear_ibuf);
			}

			memcpy(rr->rectf, linear_ibuf->rect_float, sizeof(float) * 4 * oglrender->sizex * oglrender->sizey);

			IMB_freeImBuf(linear_ibuf);
		}
	}
	else if (view_context) {
		ED_view3d_draw_offscreen_init(scene, v3d);

		GPU_offscreen_bind(oglrender->ofs); /* bind */

		/* render 3d view */
		if (rv3d->persp == RV3D_CAMOB && v3d->camera) {
			/*int is_ortho = scene->r.mode & R_ORTHO;*/
			camera = v3d->camera;
			RE_GetCameraWindow(oglrender->re, camera, scene->r.cfra, winmat);
			
		}
		else {
			rctf viewplane;
			float clipsta, clipend;

			int is_ortho = ED_view3d_viewplane_get(v3d, rv3d, sizex, sizey, &viewplane, &clipsta, &clipend, NULL);
			if (is_ortho) orthographic_m4(winmat, viewplane.xmin, viewplane.xmax, viewplane.ymin, viewplane.ymax, -clipend, clipend);
			else perspective_m4(winmat, viewplane.xmin, viewplane.xmax, viewplane.ymin, viewplane.ymax, clipsta, clipend);
		}

		rect = MEM_mallocN(sizex * sizey * sizeof(unsigned char) * 4, "offscreen rect");

		if ((scene->r.mode & R_OSA) == 0) {
			ED_view3d_draw_offscreen(scene, v3d, ar, sizex, sizey, NULL, winmat, draw_bgpic, draw_sky);
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);
		}
		else {
			/* simple accumulation, less hassle then FSAA FBO's */
			static float jit_ofs[32][2];
			float winmat_jitter[4][4];
			int *accum_buffer = MEM_mallocN(sizex * sizey * sizeof(int) * 4, "accum1");
			int i, j;

			BLI_jitter_init(jit_ofs[0], scene->r.osa);

			/* first sample buffer, also initializes 'rv3d->persmat' */
			ED_view3d_draw_offscreen(scene, v3d, ar, sizex, sizey, NULL, winmat, draw_bgpic, draw_sky);
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);

			for (i = 0; i < sizex * sizey * 4; i++)
				accum_buffer[i] = rect[i];

			/* skip the first sample */
			for (j = 1; j < scene->r.osa; j++) {
				copy_m4_m4(winmat_jitter, winmat);
				window_translate_m4(winmat_jitter, rv3d->persmat,
				                    (jit_ofs[j][0] * 2.0f) / sizex,
				                    (jit_ofs[j][1] * 2.0f) / sizey);

				ED_view3d_draw_offscreen(scene, v3d, ar, sizex, sizey, NULL, winmat_jitter, draw_bgpic, draw_sky);
				GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);

				for (i = 0; i < sizex * sizey * 4; i++)
					accum_buffer[i] += rect[i];
			}

			for (i = 0; i < sizex * sizey * 4; i++)
				rect[i] = accum_buffer[i] / scene->r.osa;

			MEM_freeN(accum_buffer);
		}

		GPU_offscreen_unbind(oglrender->ofs); /* unbind */
	}
	else {
		/* shouldnt suddenly give errors mid-render but possible */
		char err_out[256] = "unknown";
		ImBuf *ibuf_view = ED_view3d_draw_offscreen_imbuf_simple(scene, scene->camera, oglrender->sizex, oglrender->sizey,
		                                                         IB_rect, OB_SOLID, FALSE, TRUE,
		                                                         (draw_sky) ? R_ADDSKY: R_ALPHAPREMUL, err_out);
		camera = scene->camera;

		if (ibuf_view) {
			/* steal rect reference from ibuf */
			rect = (unsigned char *)ibuf_view->rect;
			ibuf_view->mall &= ~IB_rect;

			IMB_freeImBuf(ibuf_view);
		}
		else {
			fprintf(stderr, "%s: failed to get buffer, %s\n", __func__, err_out);
		}
	}

	/* note on color management:
	 *
	 * OpenGL renders into sRGB colors, but render buffers are expected to be
	 * linear So we convert to linear here, so the conversion back to bytes can make it
	 * sRGB (or other display space) again, and so that e.g. openexr saving also saves the
	 * correct linear float buffer.
	 */

	if (rect) {
		int profile_to;
		
		if (BKE_scene_check_color_management_enabled(scene))
			profile_to = IB_PROFILE_LINEAR_RGB;
		else
			profile_to = IB_PROFILE_SRGB;

		/* sequencer has got trickier conversion happened above
		 * also assume opengl's space matches byte buffer color space */
		IMB_buffer_float_from_byte(rr->rectf, rect,
		                           profile_to, IB_PROFILE_SRGB, true,
		                           oglrender->sizex, oglrender->sizey, oglrender->sizex, oglrender->sizex);
	}

	/* rr->rectf is now filled with image data */

	if ((scene->r.stamp & R_STAMP_ALL) && (scene->r.stamp & R_STAMP_DRAW))
		BKE_stamp_buf(scene, camera, rect, rr->rectf, rr->rectx, rr->recty, 4);

	RE_ReleaseResult(oglrender->re);

	/* update byte from float buffer */
	ibuf = BKE_image_acquire_ibuf(oglrender->ima, &oglrender->iuser, &lock);

	if (ibuf) {
		/* update display buffer */
		if (ibuf->rect == NULL)
			imb_addrectImBuf(ibuf);

		IMB_partial_display_buffer_update(ibuf, rr->rectf, rect, rr->rectx, 0, 0,
		                                  &scene->view_settings, &scene->display_settings,
		                                  0, 0, rr->rectx, rr->recty, true);

		/* write file for animation */
		if (oglrender->write_still) {
			char name[FILE_MAX];
			int ok;

			if (scene->r.im_format.planes == R_IMF_CHAN_DEPTH_8) {
				IMB_color_to_bw(ibuf);
			}

			BKE_makepicstring(name, scene->r.pic, oglrender->bmain->name, scene->r.cfra, &scene->r.im_format, scene->r.scemode & R_EXTENSION, FALSE);
			ok = BKE_imbuf_write_as(ibuf, name, &scene->r.im_format, TRUE); /* no need to stamp here */
			if (ok) printf("OpenGL Render written to '%s'\n", name);
			else printf("OpenGL Render failed to write '%s'\n", name);
		}
	}
	
	BKE_image_release_ibuf(oglrender->ima, ibuf, lock);

	if (rect)
		MEM_freeN(rect);
}
Exemple #11
0
static void screen_opengl_render_doit(OGLRender *oglrender, RenderResult *rr)
{
	Scene *scene = oglrender->scene;
	ARegion *ar = oglrender->ar;
	View3D *v3d = oglrender->v3d;
	RegionView3D *rv3d = oglrender->rv3d;
	Object *camera = NULL;
	ImBuf *ibuf;
	int sizex = oglrender->sizex;
	int sizey = oglrender->sizey;
	const short view_context = (v3d != NULL);
	bool draw_bgpic = true;
	bool draw_sky = (scene->r.alphamode == R_ADDSKY);
	unsigned char *rect = NULL;
	const char *viewname = RE_GetActiveRenderView(oglrender->re);

	if (oglrender->is_sequencer) {
		SeqRenderData context;
		SpaceSeq *sseq = oglrender->sseq;
		int chanshown = sseq ? sseq->chanshown : 0;
		struct bGPdata *gpd = (sseq && (sseq->flag & SEQ_SHOW_GPENCIL)) ? sseq->gpd : NULL;

		BKE_sequencer_new_render_data(
		        oglrender->bmain->eval_ctx, oglrender->bmain, scene,
		        oglrender->sizex, oglrender->sizey, 100.0f,
		        &context);

		context.view_id = BKE_scene_multiview_view_id_get(&scene->r, viewname);
		context.gpu_offscreen = oglrender->ofs;
		context.gpu_fx = oglrender->fx;
		context.gpu_full_samples = oglrender->ofs_full_samples;

		ibuf = BKE_sequencer_give_ibuf(&context, CFRA, chanshown);

		if (ibuf) {
			ImBuf *out = IMB_dupImBuf(ibuf);
			IMB_freeImBuf(ibuf);
			/* OpenGL render is considered to be preview and should be
			 * as fast as possible. So currently we're making sure sequencer
			 * result is always byte to simplify color management pipeline.
			 *
			 * TODO(sergey): In the case of output to float container (EXR)
			 * it actually makes sense to keep float buffer instead.
			 */
			if (out->rect_float != NULL) {
				IMB_rect_from_float(out);
				imb_freerectfloatImBuf(out);
			}
			BLI_assert((oglrender->sizex == ibuf->x) && (oglrender->sizey == ibuf->y));
			RE_render_result_rect_from_ibuf(rr, &scene->r, out, oglrender->view_id);
			IMB_freeImBuf(out);
		}

		if (gpd) {
			int i;
			unsigned char *gp_rect;
			unsigned char *render_rect = (unsigned char *)RE_RenderViewGetById(rr, oglrender->view_id)->rect32;

			GPU_offscreen_bind(oglrender->ofs, true);

			glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
			glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

			wmOrtho2(0, sizex, 0, sizey);
			glTranslatef(sizex / 2, sizey / 2, 0.0f);

			G.f |= G_RENDER_OGL;
			ED_gpencil_draw_ex(scene, gpd, sizex, sizey, scene->r.cfra, SPACE_SEQ);
			G.f &= ~G_RENDER_OGL;

			gp_rect = MEM_mallocN(sizex * sizey * sizeof(unsigned char) * 4, "offscreen rect");
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, gp_rect);

			for (i = 0; i < sizex * sizey * 4; i += 4) {
				blend_color_mix_byte(&render_rect[i], &render_rect[i], &gp_rect[i]);
			}
			GPU_offscreen_unbind(oglrender->ofs, true);

			MEM_freeN(gp_rect);
		}
	}
	else {
		/* shouldnt suddenly give errors mid-render but possible */
		char err_out[256] = "unknown";
		ImBuf *ibuf_view;
		const int alpha_mode = (draw_sky) ? R_ADDSKY : R_ALPHAPREMUL;

		if (view_context) {
			ibuf_view = ED_view3d_draw_offscreen_imbuf(
			       scene, v3d, ar, sizex, sizey,
			       IB_rect, draw_bgpic,
			       alpha_mode, oglrender->ofs_samples, oglrender->ofs_full_samples, viewname,
			       oglrender->fx, oglrender->ofs, err_out);

			/* for stamp only */
			if (rv3d->persp == RV3D_CAMOB && v3d->camera) {
				camera = BKE_camera_multiview_render(oglrender->scene, v3d->camera, viewname);
			}
		}
		else {
			ibuf_view = ED_view3d_draw_offscreen_imbuf_simple(
			        scene, scene->camera, oglrender->sizex, oglrender->sizey,
			        IB_rect, OB_SOLID, false, true, true,
			        alpha_mode, oglrender->ofs_samples, oglrender->ofs_full_samples, viewname,
			        oglrender->fx, oglrender->ofs, err_out);
			camera = scene->camera;
		}

		if (ibuf_view) {
			/* steal rect reference from ibuf */
			rect = (unsigned char *)ibuf_view->rect;
			ibuf_view->mall &= ~IB_rect;

			IMB_freeImBuf(ibuf_view);
		}
		else {
			fprintf(stderr, "%s: failed to get buffer, %s\n", __func__, err_out);
		}
	}

	/* note on color management:
	 *
	 * OpenGL renders into sRGB colors, but render buffers are expected to be
	 * linear So we convert to linear here, so the conversion back to bytes can make it
	 * sRGB (or other display space) again, and so that e.g. openexr saving also saves the
	 * correct linear float buffer.
	 */

	if (rect) {
		int profile_to;
		float *rectf = RE_RenderViewGetById(rr, oglrender->view_id)->rectf;

		if (BKE_scene_check_color_management_enabled(scene))
			profile_to = IB_PROFILE_LINEAR_RGB;
		else
			profile_to = IB_PROFILE_SRGB;

		/* sequencer has got trickier conversion happened above
		 * also assume opengl's space matches byte buffer color space */
		IMB_buffer_float_from_byte(rectf, rect,
		                           profile_to, IB_PROFILE_SRGB, true,
		                           oglrender->sizex, oglrender->sizey, oglrender->sizex, oglrender->sizex);

		/* rr->rectf is now filled with image data */

		if ((scene->r.stamp & R_STAMP_ALL) && (scene->r.stamp & R_STAMP_DRAW))
			BKE_image_stamp_buf(scene, camera, NULL, rect, rectf, rr->rectx, rr->recty, 4);

		MEM_freeN(rect);
	}
}
Exemple #12
0
static void screen_opengl_render_doit(OGLRender *oglrender, RenderResult *rr)
{
	Scene *scene = oglrender->scene;
	ARegion *ar = oglrender->ar;
	View3D *v3d = oglrender->v3d;
	RegionView3D *rv3d = oglrender->rv3d;
	Object *camera = NULL;
	int sizex = oglrender->sizex;
	int sizey = oglrender->sizey;
	const short view_context = (v3d != NULL);
	bool draw_bgpic = true;
	bool draw_sky = (scene->r.alphamode == R_ADDSKY);
	unsigned char *rect = NULL;
	const char *viewname = RE_GetActiveRenderView(oglrender->re);
	ImBuf *ibuf_result = NULL;

	if (oglrender->is_sequencer) {
		SpaceSeq *sseq = oglrender->sseq;
		struct bGPdata *gpd = (sseq && (sseq->flag & SEQ_SHOW_GPENCIL)) ? sseq->gpd : NULL;

		/* use pre-calculated ImBuf (avoids deadlock), see: */
		ImBuf *ibuf = oglrender->seq_data.ibufs_arr[oglrender->view_id];

		if (ibuf) {
			ImBuf *out = IMB_dupImBuf(ibuf);
			IMB_freeImBuf(ibuf);
			/* OpenGL render is considered to be preview and should be
			 * as fast as possible. So currently we're making sure sequencer
			 * result is always byte to simplify color management pipeline.
			 *
			 * TODO(sergey): In the case of output to float container (EXR)
			 * it actually makes sense to keep float buffer instead.
			 */
			if (out->rect_float != NULL) {
				IMB_rect_from_float(out);
				imb_freerectfloatImBuf(out);
			}
			BLI_assert((oglrender->sizex == ibuf->x) && (oglrender->sizey == ibuf->y));
			RE_render_result_rect_from_ibuf(rr, &scene->r, out, oglrender->view_id);
			IMB_freeImBuf(out);
		}
		else if (gpd){
			/* If there are no strips, Grease Pencil still needs a buffer to draw on */
			ImBuf *out = IMB_allocImBuf(oglrender->sizex, oglrender->sizey, 32, IB_rect);
			RE_render_result_rect_from_ibuf(rr, &scene->r, out, oglrender->view_id);
			IMB_freeImBuf(out);
		}

		if (gpd) {
			int i;
			unsigned char *gp_rect;
			unsigned char *render_rect = (unsigned char *)RE_RenderViewGetById(rr, oglrender->view_id)->rect32;

			GPU_offscreen_bind(oglrender->ofs, true);

			glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
			glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

			wmOrtho2(0, sizex, 0, sizey);
			glTranslatef(sizex / 2, sizey / 2, 0.0f);

			G.f |= G_RENDER_OGL;
			ED_gpencil_draw_ex(scene, gpd, sizex, sizey, scene->r.cfra, SPACE_SEQ);
			G.f &= ~G_RENDER_OGL;

			gp_rect = MEM_mallocN(sizex * sizey * sizeof(unsigned char) * 4, "offscreen rect");
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, gp_rect);

			for (i = 0; i < sizex * sizey * 4; i += 4) {
				blend_color_mix_byte(&render_rect[i], &render_rect[i], &gp_rect[i]);
			}
			GPU_offscreen_unbind(oglrender->ofs, true);

			MEM_freeN(gp_rect);
		}
	}
	else {
		/* shouldnt suddenly give errors mid-render but possible */
		char err_out[256] = "unknown";
		ImBuf *ibuf_view;
		const int alpha_mode = (draw_sky) ? R_ADDSKY : R_ALPHAPREMUL;

		if (view_context) {
			ibuf_view = ED_view3d_draw_offscreen_imbuf(
			       scene, v3d, ar, sizex, sizey,
			       IB_rect, draw_bgpic,
			       alpha_mode, oglrender->ofs_samples, oglrender->ofs_full_samples, viewname,
			       oglrender->fx, oglrender->ofs, err_out);

			/* for stamp only */
			if (rv3d->persp == RV3D_CAMOB && v3d->camera) {
				camera = BKE_camera_multiview_render(oglrender->scene, v3d->camera, viewname);
			}
		}
		else {
			ibuf_view = ED_view3d_draw_offscreen_imbuf_simple(
			        scene, scene->camera, oglrender->sizex, oglrender->sizey,
			        IB_rect, OB_SOLID, false, true, true,
			        alpha_mode, oglrender->ofs_samples, oglrender->ofs_full_samples, viewname,
			        oglrender->fx, oglrender->ofs, err_out);
			camera = scene->camera;
		}

		if (ibuf_view) {
			ibuf_result = ibuf_view;
			rect = (unsigned char *)ibuf_view->rect;
		}
		else {
			fprintf(stderr, "%s: failed to get buffer, %s\n", __func__, err_out);
		}
	}

	if (ibuf_result != NULL) {
		if ((scene->r.stamp & R_STAMP_ALL) && (scene->r.stamp & R_STAMP_DRAW)) {
			BKE_image_stamp_buf(scene, camera, NULL, rect, NULL, rr->rectx, rr->recty, 4);
		}
		RE_render_result_rect_from_ibuf(rr, &scene->r, ibuf_result, oglrender->view_id);
		IMB_freeImBuf(ibuf_result);
	}
}