예제 #1
0
void nuke_ld_3de4_base::_request(int x,int y,int r,int t,DD::Image::ChannelMask channels,int count)
	{
// Since for image processing we *fetch* at pixel positions,
// the inverse mapping is used (as often mentioned).
	DD::Image::Box box = bounds(_knob_direction == distort ? undistort : distort,x,y,r,t);
// The original Weta-Node forms an intersection with input0's box,
// makes sense, we do the same here. I guess, we shouldn't demand,
// what input0 does not supply, that's why.
	box.intersect(input0().info());
	input0().request(box.x(),box.y(),box.r(),box.t(),channels,count);
	}
예제 #2
0
파일: Convert.cpp 프로젝트: mor-vfx/cortex
Imath::Box2i convert( const DD::Image::Box &from )
{
    return Imath::Box2i( Imath::V2i( from.x(), from.y() ), Imath::V2i( from.r(), from.t() ) );
}
예제 #3
0
void nuke_ld_3de4_base::engine(int y,int x,int r,DD::Image::ChannelMask channels,DD::Image::Row& outrow)
	{
	if(!input(0))
		{ return; }
	int w = format().w();
	int h = format().h();
	if((w <= 0) | (h <= 0))
		{ return; }
	double inv_w = 1.0 / w;
	double inv_h = 1.0 / h;
// We construct (x_s,y_s) in a way, so that the image area is mapped to the unit interval [0,1]^2,
// which is required by our 3DE4 lens distortion plugin class. Nuke's coordinates are pixel based,
// (0,0) is the left lower corner of the left lower pixel, while (1,1) is the right upper corner
// of that pixel. The center of any pixel (ix,iy) is (ix+0.5,iy+0.5), so we add 0.5 here.
	double y_s = (0.5 + y) * inv_h;
// Determine bounding box and write down results.
	vector<ldpk::vec2d> pos;
// Reserve, so that push_back is performant.
	pos.reserve(r - x);
// Box for "lock the tile".
	DD::Image::Box box;
	for(int i = x;i < r;++i)
		{
// We don't forget shifting by (half,half)!
		double x_s = (0.5 + i) * inv_w;
		double x_d,y_d;
// Image processing, reversed mapping. Weave in 3DE4's field of view.
		if(_knob_direction == undistort)
			{ _ldm->distort(map_in_fov_x(x_s),map_in_fov_y(y_s),x_d,y_d); }
		else
			{ _ldm->undistort(map_in_fov_x(x_s),map_in_fov_y(y_s),x_d,y_d); }
// The result already contains the (half,half) shift. Reformulate in Nuke's coordinates. Weave "out" 3DE4's field of view.
		double px = map_out_fov_x(x_d) * w;
		double py = map_out_fov_y(y_d) * h;
// Build the box for "lock the tile..."
		if(i == x)
			{ box = DD::Image::Box(int(floor(px)),int(floor(py)),int(ceil(px)),int(ceil(py))); }
		else
			{ box.merge(int(floor(px)),int(floor(py)),int(ceil(px)),int(ceil(py))); }
// We store the results.
		pos.push_back(ldpk::vec2d(px,py));
		}
// Add margin for reconstruction filter. Cubic will need two pixels more, others maybe three.
	box.pad(3);

// Transfer the stored result. We set pixels x to r in outrow.
// Begin extension by David Cattermole and Ben Dickson, RSP. Thank you!
// Minor changes added by SDV for version 1.9.1.
	if(_knob_output_mode == OUTPUT_STMAP)
		{
// Output source pixel coordinates in format compatible with STMap
		double inv_w0 = 1.0 / input0().format().width();
		double inv_h0 = 1.0 / input0().format().height();
		for(int i = 0;i < pos.size();++i)
			{
			foreach(z,channels)
				{
				if(z == DD::Image::Chan_Red)
					{ outrow.writable(z)[i + x] = float(pos[i][0] * inv_w0); }
				else if(z == DD::Image::Chan_Green)
					{ outrow.writable(z)[i + x] = float(pos[i][1] * inv_h0); }
				else
					{ outrow.writable(z)[i + x] = 0.0; }
				}
			}
		}