示例#1
0
			ReturnType operator()( T *dataContainer )
	{
		assert( dataContainer );

		const typename T::ValueType &data = dataContainer->readable();

		ScaledDataConversion<typename T::ValueType::value_type, float> converter;

		typedef boost::multi_array_ref< const typename T::ValueType::value_type, 2 > SourceArray2D;
		typedef boost::multi_array_ref< unsigned int, 2 > TargetArray2D;
		
		// Grab the display and data windows to avoid dereferencing pointers during the tight loop later...
		const Box2i srcDisplayWindow = m_image->getDisplayWindow();
		const Box2i srcDataWindow = m_image->getDataWindow();
		
		const SourceArray2D sourceData( &data[0], extents[ srcDataWindow.size().y + 1 ][ srcDataWindow.size().x + 1 ] );
		TargetArray2D targetData( &m_imageBuffer[0], extents[ srcDisplayWindow.size().y + 1 ][ srcDisplayWindow.size().x + 1 ] );

		const Box2i copyRegion = boxIntersection( m_dataWindow, boxIntersection( srcDisplayWindow, srcDataWindow ) );
		
		const unsigned int boxOffsetX = copyRegion.min.x - srcDisplayWindow.min.x;
		const unsigned int boxOffsetY = copyRegion.min.y - srcDisplayWindow.min.y;

		for ( int y = copyRegion.min.y; y <= copyRegion.max.y ; y++ )
		{
			for ( int x = copyRegion.min.x; x <= copyRegion.max.x ; x++ )
			{
				targetData[ (y - copyRegion.min.y) + boxOffsetY ][ (x - copyRegion.min.x) + boxOffsetX ]
						|= std::min((unsigned int)1023, (unsigned int)(converter( sourceData[ y - srcDataWindow.min.y ][ x - srcDataWindow.min.x ] ) * 1023 )) << m_bitShift;
			}
		}
	};
示例#2
0
void LensDistortOp::begin( const CompoundObject * operands )
{
	// Get the lens model parameters.
	IECore::CompoundObjectPtr lensModelParams( runTimeCast<CompoundObject>( lensParameter()->getValue() ) );
	
	// Load the lens object.
	m_lensModel = LensModel::create( lensModelParams );
	m_lensModel->validate();
	
	// Get the distortion mode.
	m_mode = m_modeParameter->getNumericValue();
	
	// Get our image information.
	assert( runTimeCast< ImagePrimitive >(inputParameter()->getValue()) );
	ImagePrimitive *inputImage = static_cast<ImagePrimitive *>( inputParameter()->getValue() );
	
	m_imageSize = inputImage->getDisplayWindow().size();
	m_imageDataWindow = inputImage->getDataWindow();
}
	ReturnType operator()( T * dataContainer )
	{
		assert( dataContainer );

		const typename T::ValueType &data = dataContainer->readable();
		ScaledDataConversion<typename T::ValueType::value_type, float> converter;

		typedef boost::multi_array_ref< const typename T::ValueType::value_type, 2 > SourceArray2D;
		typedef boost::multi_array_ref< Color3f, 2 > TargetArray2D;

		const SourceArray2D sourceData( &data[0], extents[ m_image->getDataWindow().size().y + 1 ][ m_image->getDataWindow().size().x + 1 ] );
		TargetArray2D targetData( &m_rgbData->writable()[0], extents[ m_image->getDisplayWindow().size().y + 1 ][ m_image->getDisplayWindow().size().x + 1 ] );

		const Box2i copyRegion = boxIntersection( m_dataWindow, boxIntersection( m_image->getDisplayWindow(), m_image->getDataWindow() ) );

		for ( int y = copyRegion.min.y; y <= copyRegion.max.y ; y++ )
		{
			for ( int x = copyRegion.min.x; x <= copyRegion.max.x ; x++ )
			{
				targetData[ y - m_image->getDisplayWindow().min.y + copyRegion.min.y ][ x - m_image->getDisplayWindow().min.x + copyRegion.min.x ][ m_channelOffset ]
					= converter( sourceData[ y - m_image->getDataWindow().min.y ][ x - m_image->getDataWindow().min.x ] );
			}
		}
	};
ObjectPtr EnvMapSHProjector::doOperation( const CompoundObject *operands )
{
	ImagePrimitive * image = runTimeCast< ImagePrimitive, Object >( m_envMapParameter->getValue() );

	if ( image->getDisplayWindow() != image->getDataWindow() )
	{
		throw Exception( "EnvMapSHProjector only works with images that display and data windows match." );
	}

	unsigned bands = m_bandsParameter->getNumericValue();
	unsigned samples = m_samplesParameter->getNumericValue();
	bool rightHandSystem = m_rightHandSystemParameter->getTypedValue();
	bool applyFilter = m_applyFilterParameter->getTypedValue();
	Imath::M44f orientation = m_orientationParameter->getTypedValue();

	int imgWidth = image->getDataWindow().size().x + 1;
	int imgHeight = image->getDataWindow().size().y + 1;

	// create SH projector
	IECore::SHProjectorf projector( samples );
	projector.computeSamples( bands );

	ConstFloatVectorDataPtr redData = image->getChannel< float >( "R" );
	ConstFloatVectorDataPtr greenData = image->getChannel< float >( "G" );
	ConstFloatVectorDataPtr blueData = image->getChannel< float >( "B" );

	if ( !redData || !greenData || !blueData )
	{
		throw Exception( "EnvMap does not have the three colour channels (R,G,B)!" );
	}

	const std::vector<float> &chR = redData->readable();
	const std::vector<float> &chG = greenData->readable();
	const std::vector<float> &chB = blueData->readable();

	// rotate coordinates along X axis so that the image maps Y coordinates to the vertical direction instead of Z.
	Imath::M44f rotX90 = Imath::Eulerf( M_PI * 0.5, 0, 0 ).toMatrix44();

	// \todo: check if the order of multiplication is what we expect...
	orientation = orientation * rotX90;

	EuclideanToSphericalTransform< Imath::V3f, Imath::V2f > euc2sph;
	std::vector< Imath::V3f >::const_iterator cit = projector.euclideanCoordinates().begin();

	SHColor3f sh( bands );
	unsigned int i;
	unsigned actualSamples = projector.euclideanCoordinates().size();
	Imath::V3f systemConversion(1);
	if ( !rightHandSystem )
	{
		systemConversion[2] = -systemConversion[2];
	}

	// image to SH
	for ( i = 0; i < actualSamples; i++, cit++ )
	{
		Imath::V2f phiTheta = euc2sph.transform( ((*cit) * systemConversion) * orientation );
		int ix = (int)(phiTheta.x * (float)imgWidth / ( M_PI * 2 ));
		int iy = (int)(phiTheta.y * (float)imgHeight /  M_PI );
		if ( ix > imgWidth )
			ix = imgWidth;
		if ( iy > imgHeight )
			iy = imgHeight;
		int offset = iy * imgWidth + ix;
		projector( i, Imath::Color3f( chR[ offset ], chG[ offset ], chB[ offset ] ), sh );
	}

	// filter SH
	if ( applyFilter )
	{
		// use author's suggestion for window size.
		IECore::windowingFilter( sh, 2*sh.bands() );
	}

	Color3fVectorDataPtr result = new Color3fVectorData( sh.coefficients() );
	return result;
}
示例#5
0
void LensDistortOp::begin( const CompoundObject * operands )
{
	// Get the lens model parameters.
	IECore::CompoundObjectPtr lensModelParams( runTimeCast<CompoundObject>( lensParameter()->getValue() ) );
	
	// Load the lens object.
	m_lensModel = LensModel::create( lensModelParams );
	m_lensModel->validate();
	
	// Get the distortion mode.
	m_mode = m_modeParameter->getNumericValue();
	
	// Get our image information.
	assert( runTimeCast< ImagePrimitive >(inputParameter()->getValue()) );
	ImagePrimitive *inputImage = static_cast<ImagePrimitive *>( inputParameter()->getValue() );
	
	Imath::Box2i dataWindow( inputImage->getDataWindow() );
	Imath::Box2i displayWindow( inputImage->getDisplayWindow() );
	double displayWH[2] = { static_cast<double>( displayWindow.size().x + 1 ), static_cast<double>( displayWindow.size().y + 1 ) };
	double displayOrigin[2] = { static_cast<double>( displayWindow.min[0] ), static_cast<double>( displayWindow.min[1] ) };
	
	// Get the distorted window.
	// As the LensModel::bounds() method requires that the display window has it's origin at (0,0) in the bottom left of the image and the IECore::ImagePrimitive has it's origin in the top left,
	// convert to the correct image space and offset if by the display window's origin if it is non-zero.
	Imath::Box2i distortionSpaceBox(
		Imath::V2i( dataWindow.min[0] - displayWindow.min[0], displayWindow.size().y - ( dataWindow.max[1] - displayWindow.min[1] ) ),
		Imath::V2i( dataWindow.max[0] - displayWindow.min[0], displayWindow.size().y - ( dataWindow.min[1] - displayWindow.min[1] ) )
	);

	// Calculate the distorted data window.
	Imath::Box2i distortedWindow = m_lensModel->bounds( m_mode, distortionSpaceBox, ( displayWindow.size().x + 1 ), ( displayWindow.size().y + 1 ) );

	// Convert the distorted data window back to the same image space as IECore::ImagePrimitive.
	m_distortedDataWindow =  Imath::Box2i( 
		Imath::V2i( distortedWindow.min[0] + displayWindow.min[0], ( displayWindow.size().y - distortedWindow.max[1] ) + displayWindow.min[1] ),
		Imath::V2i( distortedWindow.max[0] + displayWindow.min[0], ( displayWindow.size().y - distortedWindow.min[1] ) + displayWindow.min[1] )
	);
	
	// Compute a 2D cache of the warped points for use in the warp() method.
	IECore::FloatVectorDataPtr cachePtr = new IECore::FloatVectorData;
	std::vector<float> &cache( cachePtr->writable() );
	cache.resize( ( m_distortedDataWindow.size().x + 1 ) * ( m_distortedDataWindow.size().y + 1 ) * 2 ); // We interleave the X and Y vector components within the cache.

	for( int y = distortedWindow.max.y, pixelIndex = 0; y >= distortedWindow.min.y; --y )
	{
		for( int x = distortedWindow.min.x; x <= distortedWindow.max.x; ++x )
		{
			// Convert to UV space with the origin in the bottom left.	
			Imath::V2f p( Imath::V2f( x, y ) );
			Imath::V2d uv( p[0] / displayWH[0], p[1] / displayWH[1] );

			// Get the distorted uv coordinate.
			Imath::V2d duv( m_mode == kDistort ? m_lensModel->distort( uv ) : m_lensModel->undistort( uv ) );

			// Transform it to image space.
			p = Imath::V2f(
				duv[0] * displayWH[0] + displayOrigin[0], ( ( displayWH[1] - 1. ) - ( duv[1] * displayWH[1] ) ) + displayOrigin[1] 
			);

			cache[pixelIndex++] = p[0];
			cache[pixelIndex++] = p[1];
		}
	}

	m_cachePtr = cachePtr;
}