Esempio n. 1
0
		/** \brief Compute occlusion from the current view direction to the
		 * given sample region.
		 *
		 * \param sampleRegion - parallelogram region over which to sample the map
		 * \param sampleOpts - set of sampling options 
		 * \param numSamples - number of samples to take for the region.
		 * \param outSamps[0] - Return parameter; amount of occlusion over the
		 *                      sample region in the viewing direction for this map.
		 */
		void sample(const Sq3DSampleQuad& sampleQuad,
				const CqShadowSampleOptions& sampleOpts,
				TqFloat* outSamps) const
		{
			// Get depths of sample positions.
			Sq3DSampleQuad quadLightCoord = sampleQuad;
			quadLightCoord.transform(m_currToLight);

			// Get texture coordinates of sample positions.
			Sq3DSampleQuad texQuad3D = sampleQuad;
			texQuad3D.transform(m_currToRaster);
			// Copy into (x,y) coordinates of texQuad and scale by the filter width.
			SqSampleQuad texQuad = texQuad3D;
			texQuad.scaleWidth(sampleOpts.sWidth(), sampleOpts.tWidth());

			// Get the EWA filter weight functor.  We use a relatively low edge cutoff
			// of 2, since we want to avoid taking samples which don't contribute much
			// to the average.  This problem would be a relative non-issue if we did
			// proper importance sampling.
			//
			/// \todo Investigate proper importance sampling to reduce the variance in
			/// shadow sampling?
			CqEwaFilterFactory ewaFactory(texQuad, m_pixels.width(),
					m_pixels.height(), sampleOpts.sBlur(), sampleOpts.tBlur(), 2);
			CqEwaFilter ewaWeights = ewaFactory.createFilter();

			/** \todo Optimization: Cull the query if it's outside the [min,max] depth
			 * range of the support.  Being able to determine the range from the tiles
			 * covered by the filter support will be a big advantage.
			 */

			SqFilterSupport support = ewaWeights.support();
			if(support.intersectsRange(0, m_pixels.width(), 0, m_pixels.height()))
			{
				if(sampleOpts.depthApprox() == DApprox_Constant)
				{
					// Functor which approximates the surface depth using a constant.
					CqConstDepthApprox depthFunc(quadLightCoord.center().z());
					applyPCF(m_pixels, sampleOpts, support, ewaWeights, depthFunc, outSamps);
				}
				else
				{
					// Get a functor which approximates the surface depth across the filter
					// support with a linear approximation.  This deduced depth will be
					// compared with the depths from the stored texture buffer.
					quadLightCoord.copy2DCoords(texQuad);
					CqSampleQuadDepthApprox depthFunc(quadLightCoord, m_pixels.width(),
							m_pixels.height());
					applyPCF(m_pixels, sampleOpts, support, ewaWeights, depthFunc, outSamps);
				}
			}
			else
			{
				// If the filter support lies wholly outside the texture, return
				// fully visible == 0.
				*outSamps = 0;
			}
		}
		/** \brief Compute occlusion from the current view direction to the
		 * given sample region.
		 *
		 * \param sampleRegion - parallelogram region over which to sample the map
		 * \param sampleOpts - set of sampling options 
		 * \param numSamples - number of samples to take for the region.
		 * \param outSamps[0] - Return parameter; amount of occlusion over the
		 *                      sample region in the viewing direction for this map.
		 */
		void sample(const Sq3DSamplePllgram& sampleRegion,
				const CqShadowSampleOptions& sampleOpts,
				const TqInt numSamples, TqFloat* outSamps)
		{
			// filter weights
			CqConstFilter filterWeights;
			// Use constant depth approximation for the surface for maximum
			// sampling speed.  We use the depth from the camera to the centre
			// of the sample region.
			CqConstDepthApprox depthFunc((m_currToLight*sampleRegion.c).z());
			// Determine rough filter support.  This results in a
			// texture-aligned box, so doesn't do proper anisotropic filtering.
			// For occlusion this isn't visible anyway because of the large
			// amount of averaging.  We also want the filter setup to be as
			// fast as possible.
//			CqVector3D side1 = m_currToRasterVec*sampleRegion.s1;
//			CqVector3D side2 = m_currToRasterVec*sampleRegion.s2;
//			CqVector3D center = m_currToRaster*sampleRegion.c;
//			TqFloat sWidthOn2 = max(side1.x(), side2.x())*m_pixels.width()/2;
//			TqFloat tWidthOn2 = max(side1.y(), side2.y())*m_pixels.height()/2;

			// TODO: Fix the above calculation so that the width is actually
			// taken into account properly.
			CqVector3D center = m_currToRaster*sampleRegion.c;
			TqFloat sWidthOn2 = 0.5*(sampleOpts.sBlur()*m_pixels.width());
			TqFloat tWidthOn2 = 0.5*(sampleOpts.tBlur()*m_pixels.height());
			SqFilterSupport support(
					lround(center.x()-sWidthOn2), lround(center.x()+sWidthOn2) + 1,
					lround(center.y()-tWidthOn2), lround(center.y()+tWidthOn2) + 1);
			// percentage closer accumulator
			CqPcfAccum<CqConstFilter, CqConstDepthApprox> accumulator(
					filterWeights, depthFunc, sampleOpts.startChannel(),
					sampleOpts.biasLow(), sampleOpts.biasHigh(), outSamps);
			// accumulate occlusion over the filter support.
			filterTextureNowrapStochastic(accumulator, m_pixels, support, numSamples);
		}
void CqOcclusionSampler::sample(const Sq3DSamplePllgram& samplePllgram,
		const CqVector3D& normal, const CqShadowSampleOptions& sampleOpts,
		TqFloat* outSamps) const
{
	assert(sampleOpts.numChannels() == 1);

	// Unit normal indicating the hemisphere to sample for occlusion.
	CqVector3D N = normal;
	N.Unit();

	const TqFloat sampNumMult = 4.0 * sampleOpts.numSamples() / m_maps.size();

	// Accumulate the total occlusion over all directions.  Here we use an
	// importance sampling approach: we decide how many samples each map should
	// have based on it's relative importance as measured by the map weight.
	TqFloat totOcc = 0;
	TqInt totNumSamples = 0;
	TqFloat maxWeight = 0;
	TqViewVec::const_iterator maxWeightMap = m_maps.begin();
	for(TqViewVec::const_iterator map = m_maps.begin(), end = m_maps.end();
			map != end; ++map)
	{
		TqFloat weight = (*map)->weight(N);
		if(weight > 0)
		{
			// Compute the number of samples to use.  Assuming that the shadow
			// maps are spread evenly over the sphere, we have an area of 
			//
			//    4*PI / m_maps.size()
			//
			// steradians per map.  The density of sample points per steradian
			// should be
			//
			//    sampleOpts.numSamples() * weight / PI
			//
			// Therefore the expected number of samples per map is
			TqFloat numSampFlt = sampNumMult*weight;
			// This isn't an integer though, so we take the floor,
			TqInt numSamples = lfloor(numSampFlt);
			// TODO: Investigate performance impact of using RandomFloat() here.
			if(m_random.RandomFloat() < numSampFlt - numSamples)
			{
				// And increment with a probability equal to the extra fraction
				// of samples that the current map should have.
				++numSamples;
			}
			if(numSamples > 0)
			{
				// Compute amount of occlusion from the current view.
				TqFloat occ = 0;
				(*map)->sample(samplePllgram, sampleOpts, numSamples, &occ);
				// Accumulate into total occlusion and weight.
				totOcc += occ*numSamples;
				totNumSamples += numSamples;
			}
			if(weight > maxWeight)
			{
				maxWeight = weight;
				maxWeightMap = map;
			}
		}
	}

	// The algorithm above sometimes results in no samples being computed for
	// low total sample numbers.  Here we attempt to allow very small numbers
	// of samples to be useful by sampling the most highly weighted map if no
	// samples have been taken
	if(totNumSamples == 0 && maxWeight > 0)
	{
		TqFloat occ = 0;
		(*maxWeightMap)->sample(samplePllgram, sampleOpts, 1, &occ);
		totOcc += occ;
		totNumSamples += 1;
	}

	// Normalize the sample
	*outSamps = totOcc / totNumSamples;
}