示例#1
0
static ulong hash_index_aux(double low, double fac, ulong n, double x)
{
  const slong i = lfloor((x-low)*fac);
  return i<0 ? 0 : (n-1<(ulong)i ? n-1 : (ulong)i);
}
void CqOcclusionSampler::sample(const Sq3DSamplePllgram& samplePllgram,
		const CqVector3D& normal, const CqShadowSampleOptions& sampleOpts,
		TqFloat* outSamps) const
{
	assert(sampleOpts.numChannels() == 1);

	// Unit normal indicating the hemisphere to sample for occlusion.
	CqVector3D N = normal;
	N.Unit();

	const TqFloat sampNumMult = 4.0 * sampleOpts.numSamples() / m_maps.size();

	// Accumulate the total occlusion over all directions.  Here we use an
	// importance sampling approach: we decide how many samples each map should
	// have based on it's relative importance as measured by the map weight.
	TqFloat totOcc = 0;
	TqInt totNumSamples = 0;
	TqFloat maxWeight = 0;
	TqViewVec::const_iterator maxWeightMap = m_maps.begin();
	for(TqViewVec::const_iterator map = m_maps.begin(), end = m_maps.end();
			map != end; ++map)
	{
		TqFloat weight = (*map)->weight(N);
		if(weight > 0)
		{
			// Compute the number of samples to use.  Assuming that the shadow
			// maps are spread evenly over the sphere, we have an area of 
			//
			//    4*PI / m_maps.size()
			//
			// steradians per map.  The density of sample points per steradian
			// should be
			//
			//    sampleOpts.numSamples() * weight / PI
			//
			// Therefore the expected number of samples per map is
			TqFloat numSampFlt = sampNumMult*weight;
			// This isn't an integer though, so we take the floor,
			TqInt numSamples = lfloor(numSampFlt);
			// TODO: Investigate performance impact of using RandomFloat() here.
			if(m_random.RandomFloat() < numSampFlt - numSamples)
			{
				// And increment with a probability equal to the extra fraction
				// of samples that the current map should have.
				++numSamples;
			}
			if(numSamples > 0)
			{
				// Compute amount of occlusion from the current view.
				TqFloat occ = 0;
				(*map)->sample(samplePllgram, sampleOpts, numSamples, &occ);
				// Accumulate into total occlusion and weight.
				totOcc += occ*numSamples;
				totNumSamples += numSamples;
			}
			if(weight > maxWeight)
			{
				maxWeight = weight;
				maxWeightMap = map;
			}
		}
	}

	// The algorithm above sometimes results in no samples being computed for
	// low total sample numbers.  Here we attempt to allow very small numbers
	// of samples to be useful by sampling the most highly weighted map if no
	// samples have been taken
	if(totNumSamples == 0 && maxWeight > 0)
	{
		TqFloat occ = 0;
		(*maxWeightMap)->sample(samplePllgram, sampleOpts, 1, &occ);
		totOcc += occ;
		totNumSamples += 1;
	}

	// Normalize the sample
	*outSamps = totOcc / totNumSamples;
}
void CqImageBuffer::AddMPG( boost::shared_ptr<CqMicroPolygon>& pmpgNew )
{
	CqRenderer* renderContext = QGetRenderContext();
	CqBound B = pmpgNew->GetBound();

	// Expand the micropolygon bound for DoF if necessary.
	if(renderContext->UsingDepthOfField())
	{
		// Get the maximum CoC multiplier for the micropolygon depth.
		const CqVector2D maxCoC = max(
			renderContext->GetCircleOfConfusion(B.vecMin().z()),
			renderContext->GetCircleOfConfusion(B.vecMax().z())
		);
		// Expand the bound by the CoC radius
		B.vecMin() -= vectorCast<CqVector3D>(maxCoC);
		B.vecMax() += vectorCast<CqVector3D>(maxCoC);
	}

	// Discard when outside the crop window.
	if ( B.vecMax().x() < renderContext->cropWindowXMin() - m_optCache.xFiltSize / 2.0f ||
	     B.vecMax().y() < renderContext->cropWindowYMin() - m_optCache.yFiltSize / 2.0f ||
	     B.vecMin().x() > renderContext->cropWindowXMax() + m_optCache.xFiltSize / 2.0f ||
	     B.vecMin().y() > renderContext->cropWindowYMax() + m_optCache.yFiltSize / 2.0f )
	{
		return;
	}

	////////// Dump the micro polygon into a dump file //////////
#if ENABLE_MPDUMP
	if(m_mpdump.IsOpen())
		m_mpdump.dump(*pmpgNew);
#endif
	/////////////////////////////////////////////////////////////


	// Find out the minimum bucket touched by the micropoly bound.

	B.vecMin().x( B.vecMin().x() - (lfloor(m_optCache.xFiltSize / 2.0f)) );
	B.vecMin().y( B.vecMin().y() - (lfloor(m_optCache.yFiltSize / 2.0f)) );
	B.vecMax().x( B.vecMax().x() + (lfloor(m_optCache.xFiltSize / 2.0f)) );
	B.vecMax().y( B.vecMax().y() + (lfloor(m_optCache.yFiltSize / 2.0f)) );

	TqInt iXBa = static_cast<TqInt>( B.vecMin().x() / m_optCache.xBucketSize );
	TqInt iYBa = static_cast<TqInt>( B.vecMin().y() / m_optCache.yBucketSize );
	TqInt iXBb = static_cast<TqInt>( B.vecMax().x() / m_optCache.xBucketSize );
	TqInt iYBb = static_cast<TqInt>( B.vecMax().y() / m_optCache.yBucketSize );

	if ( ( iXBb < m_bucketRegion.xMin() ) || ( iYBb < m_bucketRegion.yMin() ) ||
	        ( iXBa >= m_bucketRegion.xMax() ) || ( iYBa >= m_bucketRegion.yMax() ) )
	{
		return ;
	}

	// Use sane values -- otherwise sometimes crashes, probably
	// due to precision problems
	if ( iXBa < m_bucketRegion.xMin() )  iXBa = m_bucketRegion.xMin();
	if ( iYBa < m_bucketRegion.yMin() )  iYBa = m_bucketRegion.yMin();
	if ( iXBb >= m_bucketRegion.xMax() )  iXBb = m_bucketRegion.xMax() - 1;
	if ( iYBb >= m_bucketRegion.yMax() )  iYBb = m_bucketRegion.yMax() - 1;

	// Add the MP to all the Buckets that it touches
	for ( TqInt i = iXBa; i <= iXBb; i++ )
	{
		for ( TqInt j = iYBa; j <= iYBb; j++ )
		{
			CqBucket* bucket = &Bucket( i, j );
			// Only add the MPG if the bucket isn't processed.
			// \note It is possible for this to happen validly, if a primitive is occlusion culled in a 
			// previous bucket, and not in a subsequent one. When it gets processed in the later bucket
			// the MPGs can leak into the previous one, shouldn't be a problem, as the occlusion culling 
			// means the MPGs shouldn't be rendered in that bucket anyway.
			if ( !bucket->IsProcessed() )
			{
				bucket->AddMP( pmpgNew );
			}
		}
	}
}