Exemple #1
0
static void load_frame_image_sequence(VoxelData *vd, Tex *tex)
{
	ImBuf *ibuf;
	Image *ima = tex->ima;
	ImageUser *tiuser = &tex->iuser;
	ImageUser iuser = *(tiuser);
	int x = 0, y = 0, z = 0;
	const float *rf;

	if (!ima) return;
	if (iuser.frames == 0) return;
	
	ima->source = IMA_SRC_SEQUENCE;
	iuser.framenr = 1 + iuser.offset;

	/* find the first valid ibuf and use it to initialize the resolution of the data set */
	/* need to do this in advance so we know how much memory to allocate */
	ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
	while (!ibuf && (iuser.framenr < iuser.frames)) {
		iuser.framenr++;
		ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
	}
	if (!ibuf) return;
	if (!ibuf->rect_float) IMB_float_from_rect(ibuf);
	
	vd->flag |= TEX_VD_STILL;
	vd->resol[0] = ibuf->x;
	vd->resol[1] = ibuf->y;
	vd->resol[2] = iuser.frames;
	vd->dataset = MEM_mapallocN(sizeof(float) * vd_resol_size(vd), "voxel dataset");
	
	for (z = 0; z < iuser.frames; z++) {
		/* get a new ibuf for each frame */
		if (z > 0) {
			iuser.framenr++;
			BKE_image_release_ibuf(ima, ibuf, NULL);
			ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
			if (!ibuf) break;
			if (!ibuf->rect_float) IMB_float_from_rect(ibuf);
		}
		rf = ibuf->rect_float;
		
		for (y = 0; y < ibuf->y; y++) {
			for (x = 0; x < ibuf->x; x++) {
				/* currently averaged to monchrome */
				vd->dataset[BLI_VOXEL_INDEX(x, y, z, vd->resol)] = (rf[0] + rf[1] + rf[2]) / 3.0f;
				rf += 4;
			}
		}
		
		BKE_image_free_anim_ibufs(ima, iuser.framenr);
	}

	BKE_image_release_ibuf(ima, ibuf, NULL);

	vd->ok = 1;
	return;
}
Exemple #2
0
static void envmap_split_ima(EnvMap *env, ImBuf *ibuf)
{
	int dx, part;
	
	/* after lock we test cube[1], if set the other thread has done it fine */
	BLI_lock_thread(LOCK_IMAGE);
	if (env->cube[1] == NULL) {

		BKE_texture_envmap_free_data(env);
	
		dx = ibuf->y;
		dx /= 2;
		if (3 * dx == ibuf->x) {
			env->type = ENV_CUBE;
			env->ok = ENV_OSA;
		}
		else if (ibuf->x == ibuf->y) {
			env->type = ENV_PLANE;
			env->ok = ENV_OSA;
		}
		else {
			printf("Incorrect envmap size\n");
			env->ok = 0;
			env->ima->ok = 0;
		}
		
		if (env->ok) {
			if (env->type == ENV_CUBE) {
				for (part = 0; part < 6; part++) {
					env->cube[part] = IMB_allocImBuf(dx, dx, 24, IB_rect | IB_rectfloat);
				}
				IMB_float_from_rect(ibuf);
				
				IMB_rectcpy(env->cube[0], ibuf, 
				            0, 0, 0, 0, dx, dx);
				IMB_rectcpy(env->cube[1], ibuf, 
				            0, 0, dx, 0, dx, dx);
				IMB_rectcpy(env->cube[2], ibuf, 
				            0, 0, 2 * dx, 0, dx, dx);
				IMB_rectcpy(env->cube[3], ibuf, 
				            0, 0, 0, dx, dx, dx);
				IMB_rectcpy(env->cube[4], ibuf, 
				            0, 0, dx, dx, dx, dx);
				IMB_rectcpy(env->cube[5], ibuf, 
				            0, 0, 2 * dx, dx, dx, dx);
				
			}
			else { /* ENV_PLANE */
				env->cube[1] = IMB_dupImBuf(ibuf);
				IMB_float_from_rect(env->cube[1]);
			}
		}
	}
	BLI_unlock_thread(LOCK_IMAGE);
}
static CompBuf *node_composit_get_movieclip(RenderData *rd, MovieClip *clip, MovieClipUser *user)
{
	ImBuf *orig_ibuf, *ibuf;
	CompBuf *stackbuf;
	int type;

	float *rect;
	int alloc = FALSE;

	orig_ibuf = BKE_movieclip_get_ibuf(clip, user);

	if (orig_ibuf == NULL || (orig_ibuf->rect == NULL && orig_ibuf->rect_float == NULL)) {
		IMB_freeImBuf(orig_ibuf);
		return NULL;
	}

	ibuf = IMB_dupImBuf(orig_ibuf);
	IMB_freeImBuf(orig_ibuf);

	if (ibuf->rect_float == NULL || (ibuf->userflags & IB_RECT_INVALID)) {
		IMB_float_from_rect(ibuf);
		ibuf->userflags &= ~IB_RECT_INVALID;
	}

	/* now we need a float buffer from the image with matching color management */
	if (ibuf->channels == 4) {
		rect = node_composit_get_float_buffer(rd, ibuf, &alloc);
	}
	else {
		/* non-rgba passes can't use color profiles */
		rect = ibuf->rect_float;
	}
	/* done coercing into the correct color management */

	if (!alloc) {
		rect = MEM_dupallocN(rect);
		alloc = TRUE;
	}

	type = ibuf->channels;

	if (rd->scemode & R_COMP_CROP) {
		stackbuf = get_cropped_compbuf(&rd->disprect, rect, ibuf->x, ibuf->y, type);
		if (alloc)
			MEM_freeN(rect);
	}
	else {
		/* we put imbuf copy on stack, cbuf knows rect is from other ibuf when freed! */
		stackbuf = alloc_compbuf(ibuf->x, ibuf->y, type, FALSE);
		stackbuf->rect = rect;
		stackbuf->malloc = alloc;
	}

	IMB_freeImBuf(ibuf);

	return stackbuf;
}
Exemple #4
0
/* note: this function is used for multilayer too, to ensure uniform 
   handling with BKE_image_get_ibuf() */
static CompBuf *node_composit_get_image(RenderData *rd, Image *ima, ImageUser *iuser)
{
	ImBuf *ibuf;
	CompBuf *stackbuf;
	int type;

	float *rect;
	int alloc= FALSE;

	ibuf= BKE_image_get_ibuf(ima, iuser);
	if(ibuf==NULL || (ibuf->rect==NULL && ibuf->rect_float==NULL)) {
		return NULL;
	}

	if (ibuf->rect_float == NULL) {
		IMB_float_from_rect(ibuf);
	}

	/* now we need a float buffer from the image with matching color management */
	/* XXX weak code, multilayer is excluded from this */
	rect= ibuf->rect_float;
	
	/* done coercing into the correct color management */


	type= ibuf->channels;
	
	if(rd->scemode & R_COMP_CROP) {
		stackbuf= get_cropped_compbuf(&rd->disprect, rect, ibuf->x, ibuf->y, type);
		if(alloc)
			MEM_freeN(rect);
	}
	else {
		/* we put imbuf copy on stack, cbuf knows rect is from other ibuf when freed! */
		stackbuf= alloc_compbuf(ibuf->x, ibuf->y, type, FALSE);
		stackbuf->rect= rect;
		stackbuf->malloc= alloc;
	}
	
	/*code to respect the premul flag of images; I'm
	  not sure if this is a good idea for multilayer images,
	  since it never worked before for them.
	if (type==CB_RGBA && ima->flag & IMA_DO_PREMUL) {
		//premul the image
		int i;
		float *pixel = stackbuf->rect;
		
		for (i=0; i<stackbuf->x*stackbuf->y; i++, pixel += 4) {
			pixel[0] *= pixel[3];
			pixel[1] *= pixel[3];
			pixel[2] *= pixel[3];
		}
	}
	*/
	return stackbuf;
}
Exemple #5
0
/* only used for image editor curves */
void curvemapping_do_ibuf(CurveMapping *cumap, ImBuf *ibuf)
{
	ImBuf *tmpbuf;
	int pixel;
	float *pix_in;
	float col[3];
	int stride = 4;
	float *pix_out;
	
	if (ibuf == NULL)
		return;
	if (ibuf->rect_float == NULL)
		IMB_float_from_rect(ibuf);
	else if (ibuf->rect == NULL)
		imb_addrectImBuf(ibuf);
	
	if (!ibuf->rect || !ibuf->rect_float)
		return;
	
	/* work on a temp buffer, so can color manage afterwards.
	 * No worse off memory wise than comp nodes */
	tmpbuf = IMB_dupImBuf(ibuf);
	
	curvemapping_premultiply(cumap, 0);
	
	pix_in = ibuf->rect_float;
	pix_out = tmpbuf->rect_float;

	if (ibuf->channels)
		stride = ibuf->channels;
	
	for (pixel = ibuf->x * ibuf->y; pixel > 0; pixel--, pix_in += stride, pix_out += stride) {
		if (stride < 3) {
			col[0] = curvemap_evaluateF(cumap->cm, *pix_in);
			
			pix_out[1] = pix_out[2] = pix_out[3] = pix_out[0] = col[0];
		}
		else {
			curvemapping_evaluate_premulRGBF(cumap, col, pix_in);
			pix_out[0] = col[0];
			pix_out[1] = col[1];
			pix_out[2] = col[2];
			if (stride > 3)
				pix_out[3] = pix_in[3];
			else
				pix_out[3] = 1.f;
		}
	}
	
	IMB_rect_from_float(tmpbuf);
	SWAP(unsigned int *, tmpbuf->rect, ibuf->rect);
	IMB_freeImBuf(tmpbuf);
	
	curvemapping_premultiply(cumap, 1);
}
Exemple #6
0
static int paint_2d_canvas_set(ImagePaintState *s, Image *ima)
{
    ImBuf *ibuf = BKE_image_acquire_ibuf(ima, s->sima ? &s->sima->iuser : NULL, NULL);

    /* verify that we can paint and set canvas */
    if (ima == NULL) {
        return 0;
    }
    else if (ima->packedfile && ima->rr) {
        s->warnpackedfile = ima->id.name + 2;
        return 0;
    }
    else if (ibuf && ibuf->channels != 4) {
        s->warnmultifile = ima->id.name + 2;
        return 0;
    }
    else if (!ibuf || !(ibuf->rect || ibuf->rect_float))
        return 0;

    s->image = ima;
    s->canvas = ibuf;

    /* set clone canvas */
    if (s->tool == PAINT_TOOL_CLONE) {
        ima = s->brush->clone.image;
        ibuf = BKE_image_acquire_ibuf(ima, s->sima ? &s->sima->iuser : NULL, NULL);

        if (!ima || !ibuf || !(ibuf->rect || ibuf->rect_float)) {
            BKE_image_release_ibuf(ima, ibuf, NULL);
            BKE_image_release_ibuf(s->image, s->canvas, NULL);
            return 0;
        }

        s->clonecanvas = ibuf;

        /* temporarily add float rect for cloning */
        if (s->canvas->rect_float && !s->clonecanvas->rect_float) {
            IMB_float_from_rect(s->clonecanvas);
        }
        else if (!s->canvas->rect_float && !s->clonecanvas->rect)
            IMB_rect_from_float(s->clonecanvas);
    }

    /* set masking */
    s->do_masking = (s->brush->flag & BRUSH_AIRBRUSH ||
                     (s->brush->imagepaint_tool == PAINT_TOOL_SMEAR) ||
                     (s->brush->mtex.tex && !ELEM3(s->brush->mtex.brush_map_mode, MTEX_MAP_MODE_TILED, MTEX_MAP_MODE_STENCIL, MTEX_MAP_MODE_3D)))
                    ? false : true;

    return 1;
}
Exemple #7
0
static int paint_2d_canvas_set(ImagePaintState *s, Image *ima)
{
	ImBuf *ibuf = BKE_image_acquire_ibuf(ima, s->sima ? &s->sima->iuser : NULL, NULL);

	/* verify that we can paint and set canvas */
	if (ima == NULL) {
		return 0;
	}
	else if (BKE_image_has_packedfile(ima) && ima->rr) {
		s->warnpackedfile = ima->id.name + 2;
		return 0;
	}
	else if (ibuf && ibuf->channels != 4) {
		s->warnmultifile = ima->id.name + 2;
		return 0;
	}
	else if (!ibuf || !(ibuf->rect || ibuf->rect_float))
		return 0;

	s->image = ima;
	s->canvas = ibuf;

	/* set clone canvas */
	if (s->tool == PAINT_TOOL_CLONE) {
		ima = s->brush->clone.image;
		ibuf = BKE_image_acquire_ibuf(ima, s->sima ? &s->sima->iuser : NULL, NULL);

		if (!ima || !ibuf || !(ibuf->rect || ibuf->rect_float)) {
			BKE_image_release_ibuf(ima, ibuf, NULL);
			BKE_image_release_ibuf(s->image, s->canvas, NULL);
			return 0;
		}

		s->clonecanvas = ibuf;

		/* temporarily add float rect for cloning */
		if (s->canvas->rect_float && !s->clonecanvas->rect_float) {
			IMB_float_from_rect(s->clonecanvas);
		}
		else if (!s->canvas->rect_float && !s->clonecanvas->rect)
			IMB_rect_from_float(s->clonecanvas);
	}

	/* set masking */
	s->do_masking = paint_use_opacity_masking(s->brush);
	
	return 1;
}
ImBuf *render_result_rect_to_ibuf(RenderResult *rr, RenderData *rd, const int view_id)
{
	ImBuf *ibuf = IMB_allocImBuf(rr->rectx, rr->recty, rd->im_format.planes, 0);
	RenderView *rv = RE_RenderViewGetById(rr, view_id);

	/* if not exists, BKE_imbuf_write makes one */
	ibuf->rect = (unsigned int *) rv->rect32;
	ibuf->rect_float = rv->rectf;
	ibuf->zbuf_float = rv->rectz;

	/* float factor for random dither, imbuf takes care of it */
	ibuf->dither = rd->dither_intensity;
	
	/* prepare to gamma correct to sRGB color space
	 * note that sequence editor can generate 8bpc render buffers
	 */
	if (ibuf->rect) {
		if (BKE_imtype_valid_depths(rd->im_format.imtype) & (R_IMF_CHAN_DEPTH_12 | R_IMF_CHAN_DEPTH_16 | R_IMF_CHAN_DEPTH_24 | R_IMF_CHAN_DEPTH_32)) {
			if (rd->im_format.depth == R_IMF_CHAN_DEPTH_8) {
				/* Higher depth bits are supported but not needed for current file output. */
				ibuf->rect_float = NULL;
			}
			else {
				IMB_float_from_rect(ibuf);
			}
		}
		else {
			/* ensure no float buffer remained from previous frame */
			ibuf->rect_float = NULL;
		}
	}

	/* color -> grayscale */
	/* editing directly would alter the render view */
	if (rd->im_format.planes == R_IMF_PLANES_BW) {
		ImBuf *ibuf_bw = IMB_dupImBuf(ibuf);
		IMB_color_to_bw(ibuf_bw);
		IMB_freeImBuf(ibuf);
		ibuf = ibuf_bw;
	}

	return ibuf;
}
Exemple #9
0
/* use when you need to get a buffer with a certain profile
 * if the return  */
float *IMB_float_profile_ensure(struct ImBuf *ibuf, int profile, int *alloc)
{
	/* stupid but it works like this everywhere now */
	const short is_lin_from= (ibuf->profile != IB_PROFILE_NONE);
	const short is_lin_to= (profile != IB_PROFILE_NONE);

	
	if(is_lin_from == is_lin_to) {
		*alloc= 0;

		/* simple case, just allocate the buffer and return */
		if(ibuf->rect_float == NULL) {
			IMB_float_from_rect(ibuf);
		}

		return ibuf->rect_float;
	}
	else {
		/* conversion is needed, first check */
		float *fbuf= MEM_mallocN(ibuf->x * ibuf->y * sizeof(float) * 4, "IMB_float_profile_ensure");
		*alloc= 1;

		if(ibuf->rect_float == NULL) {
			if(is_lin_to) {
				imb_float_from_rect_linear(ibuf, fbuf);
			}
			else {
				imb_float_from_rect_nonlinear(ibuf, fbuf);
			}
		}
		else {
			if(is_lin_to) { /* lin -> nonlin */
				linearrgb_to_srgb_rgba_rgba_buf(fbuf, ibuf->rect_float, ibuf->x * ibuf->y);
			}
			else { /* nonlin -> lin */
				srgb_to_linearrgb_rgba_rgba_buf(fbuf, ibuf->rect_float, ibuf->x * ibuf->y);
			}
		}

		return fbuf;
	}
}
Exemple #10
0
static void colorfn(float *out, TexParams *p, bNode *node, bNodeStack **UNUSED(in), short UNUSED(thread))
{
	float x = p->co[0];
	float y = p->co[1];
	Image *ima= (Image *)node->id;
	ImageUser *iuser= (ImageUser *)node->storage;
	
	if( ima ) {
		ImBuf *ibuf = BKE_image_get_ibuf(ima, iuser);
		if( ibuf ) {
			float xsize, ysize;
			float xoff, yoff;
			int px, py;
			
			float *result;

			xsize = ibuf->x / 2;
			ysize = ibuf->y / 2;
			xoff = yoff = -1;
					
			px = (int)( (x-xoff) * xsize );
			py = (int)( (y-yoff) * ysize );
		
			if( (!xsize) || (!ysize) ) return;
			
			if( !ibuf->rect_float ) {
				BLI_lock_thread(LOCK_IMAGE);
				if( !ibuf->rect_float )
					IMB_float_from_rect(ibuf);
				BLI_unlock_thread(LOCK_IMAGE);
			}
			
			while( px < 0 ) px += ibuf->x;
			while( py < 0 ) py += ibuf->y;
			while( px >= ibuf->x ) px -= ibuf->x;
			while( py >= ibuf->y ) py -= ibuf->y;
			
			result = ibuf->rect_float + py*ibuf->x*4 + px*4;
			QUATCOPY( out, result );
		}
	}
}
void MovieClipBaseOperation::initExecution()
{
	if (this->m_movieClip) {
		BKE_movieclip_user_set_frame(this->m_movieClipUser, this->m_framenumber);
		ImBuf *ibuf;

		if (this->m_cacheFrame)
			ibuf = BKE_movieclip_get_ibuf(this->m_movieClip, this->m_movieClipUser);
		else
			ibuf = BKE_movieclip_get_ibuf_flag(this->m_movieClip, this->m_movieClipUser, this->m_movieClip->flag, MOVIECLIP_CACHE_SKIP);

		if (ibuf) {
			this->m_movieClipBuffer = ibuf;
			if (ibuf->rect_float == NULL || ibuf->userflags & IB_RECT_INVALID) {
				IMB_float_from_rect(ibuf);
				ibuf->userflags &= ~IB_RECT_INVALID;
			}
		}
	}
}
static ImBuf *accessor_get_ibuf(TrackingImageAccessor *accessor,
                                int clip_index,
                                int frame,
                                libmv_InputMode input_mode,
                                int downscale,
                                const libmv_Region *region,
                                const libmv_FrameTransform *transform)
{
	ImBuf *ibuf, *orig_ibuf, *final_ibuf;
	int64_t transform_key = 0;

	if (transform != NULL) {
		transform_key = libmv_frameAccessorgetTransformKey(transform);
	}

	/* First try to get fully processed image from the cache. */
	ibuf = accesscache_get(accessor,
	                       clip_index,
	                       frame,
	                       input_mode,
	                       downscale,
	                       transform_key);
	if (ibuf != NULL) {
		return ibuf;
	}

	/* And now we do postprocessing of the original frame. */
	orig_ibuf = accessor_get_preprocessed_ibuf(accessor, clip_index, frame);

	if (orig_ibuf == NULL) {
		return NULL;
	}

	if (region != NULL) {
		int width = region->max[0] - region->min[0],
		    height = region->max[1] - region->min[1];

		/* If the requested region goes outside of the actual frame we still
		 * return the requested region size, but only fill it's partially with
		 * the data we can.
		 */
		int clamped_origin_x = max_ii((int)region->min[0], 0),
		    clamped_origin_y = max_ii((int)region->min[1], 0);
		int dst_offset_x = clamped_origin_x - (int)region->min[0],
		    dst_offset_y = clamped_origin_y - (int)region->min[1];
		int clamped_width = width - dst_offset_x,
		    clamped_height = height - dst_offset_y;
		clamped_width = min_ii(clamped_width, orig_ibuf->x - clamped_origin_x);
		clamped_height = min_ii(clamped_height, orig_ibuf->y - clamped_origin_y);

		final_ibuf = IMB_allocImBuf(width, height, 32, IB_rectfloat);

		if (orig_ibuf->rect_float != NULL) {
			IMB_rectcpy(final_ibuf, orig_ibuf,
			            dst_offset_x, dst_offset_y,
			            clamped_origin_x, clamped_origin_y,
			            clamped_width, clamped_height);
		}
		else {
			int y;
			/* TODO(sergey): We don't do any color space or alpha conversion
			 * here. Probably Libmv is better to work in the linear space,
			 * but keep sRGB space here for compatibility for now.
			 */
			for (y = 0; y < clamped_height; ++y) {
				int x;
				for (x = 0; x < clamped_width; ++x) {
					int src_x = x + clamped_origin_x,
					    src_y = y + clamped_origin_y;
					int dst_x = x + dst_offset_x,
					    dst_y = y + dst_offset_y;
					int dst_index = (dst_y * width + dst_x) * 4,
					    src_index = (src_y * orig_ibuf->x + src_x) * 4;
					rgba_uchar_to_float(final_ibuf->rect_float + dst_index,
					                    (unsigned char *)orig_ibuf->rect +
					                                     src_index);
				}
			}
		}
	}
	else {
		/* Libmv only works with float images,
		 *
		 * This would likely make it so loads of float buffers are being stored
		 * in the cache which is nice on the one hand (faster re-use of the
		 * frames) but on the other hand it bumps the memory usage up.
		 */
		BLI_lock_thread(LOCK_MOVIECLIP);
		IMB_float_from_rect(orig_ibuf);
		BLI_unlock_thread(LOCK_MOVIECLIP);
		final_ibuf = orig_ibuf;
	}

	if (downscale > 0) {
		if (final_ibuf == orig_ibuf) {
			final_ibuf = IMB_dupImBuf(orig_ibuf);
		}
		IMB_scaleImBuf(final_ibuf,
		               ibuf->x / (1 << downscale),
		               ibuf->y / (1 << downscale));
	}

	if (transform != NULL) {
		libmv_FloatImage input_image, output_image;
		ibuf_to_float_image(final_ibuf, &input_image);
		libmv_frameAccessorgetTransformRun(transform,
		                                   &input_image,
		                                   &output_image);
		if (final_ibuf != orig_ibuf) {
			IMB_freeImBuf(final_ibuf);
		}
		final_ibuf = float_image_to_ibuf(&output_image);
		libmv_floatImageDestroy(&output_image);
	}

	if (input_mode == LIBMV_IMAGE_MODE_RGBA) {
		BLI_assert(ibuf->channels == 3 || ibuf->channels == 4);
		/* pass */
	}
	else /* if (input_mode == LIBMV_IMAGE_MODE_MONO) */ {
		if (final_ibuf->channels != 1) {
			ImBuf *grayscale_ibuf = make_grayscale_ibuf_copy(final_ibuf);
			if (final_ibuf != orig_ibuf) {
				/* We dereference original frame later. */
				IMB_freeImBuf(final_ibuf);
			}
			final_ibuf = grayscale_ibuf;
		}
	}

	/* it's possible processing still didn't happen at this point,
	 * but we really need a copy of the buffer to be transformed
	 * and to be put to the cache.
	 */
	if (final_ibuf == orig_ibuf) {
		final_ibuf = IMB_dupImBuf(orig_ibuf);
	}

	IMB_freeImBuf(orig_ibuf);

	/* We put postprocessed frame to the cache always for now,
	 * not the smartest thing in the world, but who cares at this point.
	 */

	/* TODO(sergey): Disable cache for now, because we don't store region
	 * in the cache key and can't check whether cached version is usable for
	 * us or not.
	 *
	 * Need to think better about what to cache and when.
	 */
	if (false) {
		accesscache_put(accessor,
		                clip_index,
		                frame,
		                input_mode,
		                downscale,
		                transform_key,
		                final_ibuf);
	}

	return final_ibuf;
}
Exemple #13
0
/* note: this function is used for multilayer too, to ensure uniform 
   handling with BKE_image_get_ibuf() */
static CompBuf *node_composit_get_image(RenderData *rd, Image *ima, ImageUser *iuser)
{
	ImBuf *ibuf;
	CompBuf *stackbuf;
	int type;
	
	ibuf= BKE_image_get_ibuf(ima, iuser);
	if(ibuf==NULL)
		return NULL;
	
	if (rd->color_mgt_flag & R_COLOR_MANAGEMENT) {
		if (ibuf->profile == IB_PROFILE_NONE) {
			/* if float buffer already exists = already linear */
			/* else ... */
			if (ibuf->rect_float == NULL) {
				imb_freerectfloatImBuf(ibuf);
				ibuf->profile = IB_PROFILE_SRGB;
				IMB_float_from_rect(ibuf);
			} else {
				ibuf->profile = IB_PROFILE_LINEAR_RGB;
			}
		}
	} else {
		if (ibuf->profile == IB_PROFILE_SRGB) {
			if (ibuf->rect_float != NULL) {
				imb_freerectfloatImBuf(ibuf);
			}
			ibuf->profile = IB_PROFILE_NONE;
			IMB_float_from_rect(ibuf);
		}
	}
	
	if (ibuf->rect_float == NULL) {
		IMB_float_from_rect(ibuf);
	}

	type= ibuf->channels;
	
	if(rd->scemode & R_COMP_CROP) {
		stackbuf= get_cropped_compbuf(&rd->disprect, ibuf->rect_float, ibuf->x, ibuf->y, type);
	}
	else {
		/* we put imbuf copy on stack, cbuf knows rect is from other ibuf when freed! */
		stackbuf= alloc_compbuf(ibuf->x, ibuf->y, type, 0);
		stackbuf->rect= ibuf->rect_float;
	}
	
	/*code to respect the premul flag of images; I'm
	  not sure if this is a good idea for multilayer images,
	  since it never worked before for them.
	if (type==CB_RGBA && ima->flag & IMA_DO_PREMUL) {
		//premul the image
		int i;
		float *pixel = stackbuf->rect;
		
		for (i=0; i<stackbuf->x*stackbuf->y; i++, pixel += 4) {
			pixel[0] *= pixel[3];
			pixel[1] *= pixel[3];
			pixel[2] *= pixel[3];
		}
	}
	*/
	return stackbuf;
};
Exemple #14
0
static void screen_opengl_render_doit(OGLRender *oglrender, RenderResult *rr)
{
	Scene *scene = oglrender->scene;
	ARegion *ar = oglrender->ar;
	View3D *v3d = oglrender->v3d;
	RegionView3D *rv3d = oglrender->rv3d;
	Object *camera = NULL;
	ImBuf *ibuf;
	float winmat[4][4];
	int sizex = oglrender->sizex;
	int sizey = oglrender->sizey;
	const short view_context = (v3d != NULL);
	bool draw_bgpic = true;
	bool draw_sky = (scene->r.alphamode == R_ADDSKY);
	unsigned char *rect = NULL;
	const char *viewname = RE_GetActiveRenderView(oglrender->re);

	if (oglrender->is_sequencer) {
		SeqRenderData context;
		SpaceSeq *sseq = oglrender->sseq;
		int chanshown = sseq ? sseq->chanshown : 0;
		struct bGPdata *gpd = (sseq && (sseq->flag & SEQ_SHOW_GPENCIL)) ? sseq->gpd : NULL;

		BKE_sequencer_new_render_data(
		        oglrender->bmain->eval_ctx, oglrender->bmain, scene,
		        oglrender->sizex, oglrender->sizey, 100.0f,
		        &context);

		context.view_id = BKE_scene_multiview_view_id_get(&scene->r, viewname);
		ibuf = BKE_sequencer_give_ibuf(&context, CFRA, chanshown);

		if (ibuf) {
			float *rectf;
			ImBuf *linear_ibuf;

			BLI_assert((oglrender->sizex == ibuf->x) && (oglrender->sizey == ibuf->y));

			linear_ibuf = IMB_dupImBuf(ibuf);
			IMB_freeImBuf(ibuf);

			if (linear_ibuf->rect_float == NULL) {
				/* internally sequencer working in display space and stores both bytes and float buffers in that space.
				 * It is possible that byte->float onversion didn't happen in sequencer (e.g. when adding image sequence/movie
				 * into sequencer) there'll be only byte buffer. Create float buffer from existing byte buffer, making it linear
				 */

				IMB_float_from_rect(linear_ibuf);
			}
			else {
				/* ensure float buffer is in linear space, not in display space */
				BKE_sequencer_imbuf_from_sequencer_space(scene, linear_ibuf);
			}

			rectf = RE_RenderViewGetRectf(rr, oglrender->view_id);
			memcpy(rectf, linear_ibuf->rect_float, sizeof(float) * 4 * oglrender->sizex * oglrender->sizey);

			IMB_freeImBuf(linear_ibuf);
		}

		if (gpd) {
			int i;
			unsigned char *gp_rect;

			GPU_offscreen_bind(oglrender->ofs, true);

			glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
			glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

			wmOrtho2(0, sizex, 0, sizey);
			glTranslatef(sizex / 2, sizey / 2, 0.0f);

			G.f |= G_RENDER_OGL;
			ED_gpencil_draw_ex(scene, gpd, sizex, sizey, scene->r.cfra, SPACE_SEQ);
			G.f &= ~G_RENDER_OGL;

			gp_rect = MEM_mallocN(sizex * sizey * sizeof(unsigned char) * 4, "offscreen rect");
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, gp_rect);

			for (i = 0; i < sizex * sizey * 4; i += 4) {
				float  col_src[4];
				rgba_uchar_to_float(col_src, &gp_rect[i]);
				blend_color_mix_float(&rr->rectf[i], &rr->rectf[i], col_src);
			}
			GPU_offscreen_unbind(oglrender->ofs, true);

			MEM_freeN(gp_rect);
		}
	}
	else if (view_context) {
		bool is_persp;
		/* full copy */
		GPUFXSettings fx_settings = v3d->fx_settings;

		ED_view3d_draw_offscreen_init(scene, v3d);

		GPU_offscreen_bind(oglrender->ofs, true); /* bind */

		/* render 3d view */
		if (rv3d->persp == RV3D_CAMOB && v3d->camera) {
			/*int is_ortho = scene->r.mode & R_ORTHO;*/
			camera = BKE_camera_multiview_render(oglrender->scene, v3d->camera, viewname);
			RE_GetCameraWindow(oglrender->re, camera, scene->r.cfra, winmat);
			if (camera->type == OB_CAMERA) {
				Camera *cam = camera->data;
				is_persp = cam->type == CAM_PERSP;
			}
			else
				is_persp = true;
			BKE_camera_to_gpu_dof(camera, &fx_settings);
		}
		else {
			rctf viewplane;
			float clipsta, clipend;

			bool is_ortho = ED_view3d_viewplane_get(v3d, rv3d, sizex, sizey, &viewplane, &clipsta, &clipend, NULL);
			if (is_ortho) orthographic_m4(winmat, viewplane.xmin, viewplane.xmax, viewplane.ymin, viewplane.ymax, -clipend, clipend);
			else perspective_m4(winmat, viewplane.xmin, viewplane.xmax, viewplane.ymin, viewplane.ymax, clipsta, clipend);

			is_persp = !is_ortho;
		}

		rect = MEM_mallocN(sizex * sizey * sizeof(unsigned char) * 4, "offscreen rect");

		if ((scene->r.mode & R_OSA) == 0) {
			ED_view3d_draw_offscreen(
			        scene, v3d, ar, sizex, sizey, NULL, winmat,
			        draw_bgpic, draw_sky, is_persp,
			        oglrender->ofs, oglrender->fx, &fx_settings, viewname);
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);
		}
		else {
			/* simple accumulation, less hassle then FSAA FBO's */
			static float jit_ofs[32][2];
			float winmat_jitter[4][4];
			int *accum_buffer = MEM_mallocN(sizex * sizey * sizeof(int) * 4, "accum1");
			int i, j;

			BLI_jitter_init(jit_ofs, scene->r.osa);

			/* first sample buffer, also initializes 'rv3d->persmat' */
			ED_view3d_draw_offscreen(
			        scene, v3d, ar, sizex, sizey, NULL, winmat,
			        draw_bgpic, draw_sky, is_persp,
			        oglrender->ofs, oglrender->fx, &fx_settings, viewname);
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);

			for (i = 0; i < sizex * sizey * 4; i++)
				accum_buffer[i] = rect[i];

			/* skip the first sample */
			for (j = 1; j < scene->r.osa; j++) {
				copy_m4_m4(winmat_jitter, winmat);
				window_translate_m4(winmat_jitter, rv3d->persmat,
				                    (jit_ofs[j][0] * 2.0f) / sizex,
				                    (jit_ofs[j][1] * 2.0f) / sizey);

				ED_view3d_draw_offscreen(
				        scene, v3d, ar, sizex, sizey, NULL, winmat_jitter,
				        draw_bgpic, draw_sky, is_persp,
				        oglrender->ofs, oglrender->fx, &fx_settings, viewname);
				GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);

				for (i = 0; i < sizex * sizey * 4; i++)
					accum_buffer[i] += rect[i];
			}

			for (i = 0; i < sizex * sizey * 4; i++)
				rect[i] = accum_buffer[i] / scene->r.osa;

			MEM_freeN(accum_buffer);
		}

		GPU_offscreen_unbind(oglrender->ofs, true); /* unbind */
	}
	else {
		/* shouldnt suddenly give errors mid-render but possible */
		char err_out[256] = "unknown";
		ImBuf *ibuf_view = ED_view3d_draw_offscreen_imbuf_simple(scene, scene->camera, oglrender->sizex, oglrender->sizey,
		                                                         IB_rect, OB_SOLID, false, true, true,
		                                                         (draw_sky) ? R_ADDSKY : R_ALPHAPREMUL, viewname, err_out);
		camera = scene->camera;

		if (ibuf_view) {
			/* steal rect reference from ibuf */
			rect = (unsigned char *)ibuf_view->rect;
			ibuf_view->mall &= ~IB_rect;

			IMB_freeImBuf(ibuf_view);
		}
		else {
			fprintf(stderr, "%s: failed to get buffer, %s\n", __func__, err_out);
		}
	}

	/* note on color management:
	 *
	 * OpenGL renders into sRGB colors, but render buffers are expected to be
	 * linear So we convert to linear here, so the conversion back to bytes can make it
	 * sRGB (or other display space) again, and so that e.g. openexr saving also saves the
	 * correct linear float buffer.
	 */

	if (rect) {
		int profile_to;
		float *rectf = RE_RenderViewGetRectf(rr, oglrender->view_id);
		
		if (BKE_scene_check_color_management_enabled(scene))
			profile_to = IB_PROFILE_LINEAR_RGB;
		else
			profile_to = IB_PROFILE_SRGB;

		/* sequencer has got trickier conversion happened above
		 * also assume opengl's space matches byte buffer color space */
		IMB_buffer_float_from_byte(rectf, rect,
		                           profile_to, IB_PROFILE_SRGB, true,
		                           oglrender->sizex, oglrender->sizey, oglrender->sizex, oglrender->sizex);

		/* rr->rectf is now filled with image data */

		if ((scene->r.stamp & R_STAMP_ALL) && (scene->r.stamp & R_STAMP_DRAW))
			BKE_image_stamp_buf(scene, camera, rect, rectf, rr->rectx, rr->recty, 4);

		MEM_freeN(rect);
	}
}
Exemple #15
0
static void screen_opengl_render_apply(OGLRender *oglrender)
{
	Scene *scene = oglrender->scene;
	ARegion *ar = oglrender->ar;
	View3D *v3d = oglrender->v3d;
	RegionView3D *rv3d = oglrender->rv3d;
	RenderResult *rr;
	Object *camera = NULL;
	ImBuf *ibuf;
	void *lock;
	float winmat[4][4];
	int sizex = oglrender->sizex;
	int sizey = oglrender->sizey;
	const short view_context = (v3d != NULL);
	bool draw_bgpic = true;
	bool draw_sky = (scene->r.alphamode == R_ADDSKY);
	unsigned char *rect = NULL;

	rr = RE_AcquireResultRead(oglrender->re);

	if (oglrender->is_sequencer) {
		SeqRenderData context;
		int chanshown = oglrender->sseq ? oglrender->sseq->chanshown : 0;

		context = BKE_sequencer_new_render_data(oglrender->bmain, scene, oglrender->sizex, oglrender->sizey, 100.0f);

		ibuf = BKE_sequencer_give_ibuf(context, CFRA, chanshown);

		if (ibuf) {
			ImBuf *linear_ibuf;

			BLI_assert((oglrender->sizex == ibuf->x) && (oglrender->sizey == ibuf->y));

			linear_ibuf = IMB_dupImBuf(ibuf);
			IMB_freeImBuf(ibuf);

			if (linear_ibuf->rect_float == NULL) {
				/* internally sequencer working in display space and stores both bytes and float buffers in that space.
				 * It is possible that byte->float onversion didn't happen in sequencer (e.g. when adding image sequence/movie
				 * into sequencer) there'll be only byte buffer. Create float buffer from existing byte buffer, making it linear
				 */

				IMB_float_from_rect(linear_ibuf);
			}
			else {
				/* ensure float buffer is in linear space, not in display space */
				BKE_sequencer_imbuf_from_sequencer_space(scene, linear_ibuf);
			}

			memcpy(rr->rectf, linear_ibuf->rect_float, sizeof(float) * 4 * oglrender->sizex * oglrender->sizey);

			IMB_freeImBuf(linear_ibuf);
		}
	}
	else if (view_context) {
		ED_view3d_draw_offscreen_init(scene, v3d);

		GPU_offscreen_bind(oglrender->ofs); /* bind */

		/* render 3d view */
		if (rv3d->persp == RV3D_CAMOB && v3d->camera) {
			/*int is_ortho = scene->r.mode & R_ORTHO;*/
			camera = v3d->camera;
			RE_GetCameraWindow(oglrender->re, camera, scene->r.cfra, winmat);
			
		}
		else {
			rctf viewplane;
			float clipsta, clipend;

			int is_ortho = ED_view3d_viewplane_get(v3d, rv3d, sizex, sizey, &viewplane, &clipsta, &clipend, NULL);
			if (is_ortho) orthographic_m4(winmat, viewplane.xmin, viewplane.xmax, viewplane.ymin, viewplane.ymax, -clipend, clipend);
			else perspective_m4(winmat, viewplane.xmin, viewplane.xmax, viewplane.ymin, viewplane.ymax, clipsta, clipend);
		}

		rect = MEM_mallocN(sizex * sizey * sizeof(unsigned char) * 4, "offscreen rect");

		if ((scene->r.mode & R_OSA) == 0) {
			ED_view3d_draw_offscreen(scene, v3d, ar, sizex, sizey, NULL, winmat, draw_bgpic, draw_sky);
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);
		}
		else {
			/* simple accumulation, less hassle then FSAA FBO's */
			static float jit_ofs[32][2];
			float winmat_jitter[4][4];
			int *accum_buffer = MEM_mallocN(sizex * sizey * sizeof(int) * 4, "accum1");
			int i, j;

			BLI_jitter_init(jit_ofs[0], scene->r.osa);

			/* first sample buffer, also initializes 'rv3d->persmat' */
			ED_view3d_draw_offscreen(scene, v3d, ar, sizex, sizey, NULL, winmat, draw_bgpic, draw_sky);
			GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);

			for (i = 0; i < sizex * sizey * 4; i++)
				accum_buffer[i] = rect[i];

			/* skip the first sample */
			for (j = 1; j < scene->r.osa; j++) {
				copy_m4_m4(winmat_jitter, winmat);
				window_translate_m4(winmat_jitter, rv3d->persmat,
				                    (jit_ofs[j][0] * 2.0f) / sizex,
				                    (jit_ofs[j][1] * 2.0f) / sizey);

				ED_view3d_draw_offscreen(scene, v3d, ar, sizex, sizey, NULL, winmat_jitter, draw_bgpic, draw_sky);
				GPU_offscreen_read_pixels(oglrender->ofs, GL_UNSIGNED_BYTE, rect);

				for (i = 0; i < sizex * sizey * 4; i++)
					accum_buffer[i] += rect[i];
			}

			for (i = 0; i < sizex * sizey * 4; i++)
				rect[i] = accum_buffer[i] / scene->r.osa;

			MEM_freeN(accum_buffer);
		}

		GPU_offscreen_unbind(oglrender->ofs); /* unbind */
	}
	else {
		/* shouldnt suddenly give errors mid-render but possible */
		char err_out[256] = "unknown";
		ImBuf *ibuf_view = ED_view3d_draw_offscreen_imbuf_simple(scene, scene->camera, oglrender->sizex, oglrender->sizey,
		                                                         IB_rect, OB_SOLID, FALSE, TRUE,
		                                                         (draw_sky) ? R_ADDSKY: R_ALPHAPREMUL, err_out);
		camera = scene->camera;

		if (ibuf_view) {
			/* steal rect reference from ibuf */
			rect = (unsigned char *)ibuf_view->rect;
			ibuf_view->mall &= ~IB_rect;

			IMB_freeImBuf(ibuf_view);
		}
		else {
			fprintf(stderr, "%s: failed to get buffer, %s\n", __func__, err_out);
		}
	}

	/* note on color management:
	 *
	 * OpenGL renders into sRGB colors, but render buffers are expected to be
	 * linear So we convert to linear here, so the conversion back to bytes can make it
	 * sRGB (or other display space) again, and so that e.g. openexr saving also saves the
	 * correct linear float buffer.
	 */

	if (rect) {
		int profile_to;
		
		if (BKE_scene_check_color_management_enabled(scene))
			profile_to = IB_PROFILE_LINEAR_RGB;
		else
			profile_to = IB_PROFILE_SRGB;

		/* sequencer has got trickier conversion happened above
		 * also assume opengl's space matches byte buffer color space */
		IMB_buffer_float_from_byte(rr->rectf, rect,
		                           profile_to, IB_PROFILE_SRGB, true,
		                           oglrender->sizex, oglrender->sizey, oglrender->sizex, oglrender->sizex);
	}

	/* rr->rectf is now filled with image data */

	if ((scene->r.stamp & R_STAMP_ALL) && (scene->r.stamp & R_STAMP_DRAW))
		BKE_stamp_buf(scene, camera, rect, rr->rectf, rr->rectx, rr->recty, 4);

	RE_ReleaseResult(oglrender->re);

	/* update byte from float buffer */
	ibuf = BKE_image_acquire_ibuf(oglrender->ima, &oglrender->iuser, &lock);

	if (ibuf) {
		/* update display buffer */
		if (ibuf->rect == NULL)
			imb_addrectImBuf(ibuf);

		IMB_partial_display_buffer_update(ibuf, rr->rectf, rect, rr->rectx, 0, 0,
		                                  &scene->view_settings, &scene->display_settings,
		                                  0, 0, rr->rectx, rr->recty, true);

		/* write file for animation */
		if (oglrender->write_still) {
			char name[FILE_MAX];
			int ok;

			if (scene->r.im_format.planes == R_IMF_CHAN_DEPTH_8) {
				IMB_color_to_bw(ibuf);
			}

			BKE_makepicstring(name, scene->r.pic, oglrender->bmain->name, scene->r.cfra, &scene->r.im_format, scene->r.scemode & R_EXTENSION, FALSE);
			ok = BKE_imbuf_write_as(ibuf, name, &scene->r.im_format, TRUE); /* no need to stamp here */
			if (ok) printf("OpenGL Render written to '%s'\n", name);
			else printf("OpenGL Render failed to write '%s'\n", name);
		}
	}
	
	BKE_image_release_ibuf(oglrender->ima, ibuf, lock);

	if (rect)
		MEM_freeN(rect);
}
Exemple #16
0
void BKE_movieclip_update_scopes(MovieClip *clip, MovieClipUser *user, MovieClipScopes *scopes)
{
	if (scopes->ok)
		return;

	if (scopes->track_preview) {
		IMB_freeImBuf(scopes->track_preview);
		scopes->track_preview = NULL;
	}

	if (scopes->track_search) {
		IMB_freeImBuf(scopes->track_search);
		scopes->track_search = NULL;
	}

	scopes->marker = NULL;
	scopes->track = NULL;
	scopes->track_locked = TRUE;

	if (clip) {
		MovieTrackingTrack *act_track = BKE_tracking_track_get_active(&clip->tracking);

		if (act_track) {
			MovieTrackingTrack *track = act_track;
			int framenr = BKE_movieclip_remap_scene_to_clip_frame(clip, user->framenr);
			MovieTrackingMarker *marker = BKE_tracking_marker_get(track, framenr);

			scopes->marker = marker;
			scopes->track = track;

			if (marker->flag & MARKER_DISABLED) {
				scopes->track_disabled = TRUE;
			}
			else {
				ImBuf *ibuf = BKE_movieclip_get_ibuf(clip, user);

				scopes->track_disabled = FALSE;

				if (ibuf && (ibuf->rect || ibuf->rect_float)) {
					ImBuf *search_ibuf;
					MovieTrackingMarker undist_marker = *marker;

					if (user->render_flag & MCLIP_PROXY_RENDER_UNDISTORT) {
						int width, height;
						float aspy = 1.0f / clip->tracking.camera.pixel_aspect;

						BKE_movieclip_get_size(clip, user, &width, &height);

						undist_marker.pos[0] *= width;
						undist_marker.pos[1] *= height * aspy;

						BKE_tracking_undistort_v2(&clip->tracking, undist_marker.pos, undist_marker.pos);

						undist_marker.pos[0] /= width;
						undist_marker.pos[1] /= height * aspy;
					}

					search_ibuf = BKE_tracking_get_search_imbuf(ibuf, track, &undist_marker, TRUE, TRUE);

					if (search_ibuf) {
						if (!search_ibuf->rect_float) {
							/* sampling happens in float buffer */
							IMB_float_from_rect(search_ibuf);
						}

						scopes->track_search = search_ibuf;
					}

					scopes->undist_marker = undist_marker;

					scopes->frame_width = ibuf->x;
					scopes->frame_height = ibuf->y;

					scopes->use_track_mask = track->flag & TRACK_PREVIEW_ALPHA;
				}

				IMB_freeImBuf(ibuf);
			}

			if ((track->flag & TRACK_LOCKED) == 0) {
				float pat_min[2], pat_max[2];

				scopes->track_locked = FALSE;

				/* XXX: would work fine with non-transformed patterns, but would likely fail
				 *      with transformed patterns, but that would be easier to debug when
				 *      we'll have real pattern sampling (at least to test) */
				BKE_tracking_marker_pattern_minmax(marker, pat_min, pat_max);

				scopes->slide_scale[0] = pat_max[0] - pat_min[0];
				scopes->slide_scale[1] = pat_max[1] - pat_min[1];
			}
		}
	}

	scopes->framenr = user->framenr;
	scopes->ok = TRUE;
}