Пример #1
0
void GrGLProgramBuilder::emitAndInstallProc(const GrFragmentProcessor& fp,
                                            int index,
                                            const char* outColor,
                                            const char* inColor) {
    GrGLInstalledFragProc* ifp = new GrGLInstalledFragProc;

    ifp->fGLProc.reset(fp.createGLSLInstance());

    SkSTArray<4, GrGLSLTextureSampler> samplers(fp.numTextures());
    this->emitSamplers(fp, &samplers, ifp);

    GrGLSLFragmentProcessor::EmitArgs args(this,
                                           &fFS,
                                           this->glslCaps(),
                                           fp,
                                           outColor,
                                           inColor,
                                           fOutCoords[index],
                                           samplers);
    ifp->fGLProc->emitCode(args);

    // We have to check that effects and the code they emit are consistent, ie if an effect
    // asks for dst color, then the emit code needs to follow suit
    verify(fp);
    fFragmentProcessors->fProcs.push_back(ifp);
}
Пример #2
0
void GrGLProgramBuilder::emitAndInstallProc(const GrPrimitiveProcessor& gp,
                                            const char* outColor,
                                            const char* outCoverage) {
    SkASSERT(!fGeometryProcessor);
    fGeometryProcessor = new GrGLInstalledGeoProc;

    fGeometryProcessor->fGLProc.reset(gp.createGLSLInstance(*fGpu->glCaps().glslCaps()));

    SkSTArray<4, GrGLSLTextureSampler> samplers(gp.numTextures());
    this->emitSamplers(gp, &samplers, fGeometryProcessor);

    GrGLSLGeometryProcessor::EmitArgs args(this,
                                           &fVS,
                                           &fFS,
                                           &fVaryingHandler,
                                           this->glslCaps(),
                                           gp,
                                           outColor,
                                           outCoverage,
                                           samplers,
                                           fCoordTransforms,
                                           &fOutCoords);
    fGeometryProcessor->fGLProc->emitCode(args);

    // We have to check that effects and the code they emit are consistent, ie if an effect
    // asks for dst color, then the emit code needs to follow suit
    verify(gp);
}
Пример #3
0
void GrGLPathTexGenProgramEffects::emitEffect(GrGLFragmentOnlyShaderBuilder* builder,
                                          const GrEffectStage& stage,
                                          const GrEffectKey& key,
                                          const char* outColor,
                                          const char* inColor,
                                          int stageIndex) {
    GrDrawEffect drawEffect(stage, false);
    const GrEffect* effect = stage.getEffect();
    SkSTArray<2, TransformedCoords> coords(effect->numTransforms());
    SkSTArray<4, TextureSampler> samplers(effect->numTextures());

    SkASSERT(0 == stage.getVertexAttribIndexCount());
    this->setupPathTexGen(builder, drawEffect, &coords);
    this->emitSamplers(builder, effect, &samplers);

    GrGLEffect* glEffect = effect->getFactory().createGLInstance(drawEffect);
    fGLEffects.push_back(glEffect);

    // Enclose custom code in a block to avoid namespace conflicts
    SkString openBrace;
    openBrace.printf("\t{ // Stage %d: %s\n", stageIndex, glEffect->name());
    builder->fsCodeAppend(openBrace.c_str());

    SkASSERT(!glEffect->isVertexEffect());
    glEffect->emitCode(builder, drawEffect, key, outColor, inColor, coords, samplers);

    builder->fsCodeAppend("\t}\n");
}
Пример #4
0
void GrGLVertexProgramEffects::emitEffect(GrGLFullShaderBuilder* builder,
                                          const GrEffectStage& stage,
                                          const GrEffectKey& key,
                                          const char* outColor,
                                          const char* inColor,
                                          int stageIndex) {
    GrDrawEffect drawEffect(stage, fHasExplicitLocalCoords);
    const GrEffect* effect = stage.getEffect();
    SkSTArray<2, TransformedCoords> coords(effect->numTransforms());
    SkSTArray<4, TextureSampler> samplers(effect->numTextures());

    this->emitAttributes(builder, stage);
    this->emitTransforms(builder, drawEffect, &coords);
    this->emitSamplers(builder, effect, &samplers);

    GrGLEffect* glEffect = effect->getFactory().createGLInstance(drawEffect);
    fGLEffects.push_back(glEffect);

    // Enclose custom code in a block to avoid namespace conflicts
    SkString openBrace;
    openBrace.printf("\t{ // Stage %d: %s\n", stageIndex, glEffect->name());
    builder->vsCodeAppend(openBrace.c_str());
    builder->fsCodeAppend(openBrace.c_str());

    if (glEffect->isVertexEffect()) {
        GrGLVertexEffect* vertexEffect = static_cast<GrGLVertexEffect*>(glEffect);
        vertexEffect->emitCode(builder, drawEffect, key, outColor, inColor, coords, samplers);
    } else {
        glEffect->emitCode(builder, drawEffect, key, outColor, inColor, coords, samplers);
    }

    builder->vsCodeAppend("\t}\n");
    builder->fsCodeAppend("\t}\n");
}
Пример #5
0
MTS_NAMESPACE_BEGIN

RenderJob::RenderJob(const std::string &threadName,
	Scene *scene, RenderQueue *queue, int sceneResID, int sensorResID,
	int samplerResID, bool threadIsCritical, bool interactive)
	: Thread(threadName), m_scene(scene), m_queue(queue), m_interactive(interactive) {

	/* Optional: bring the process down when this thread crashes */
	setCritical(threadIsCritical);

	m_queue->addJob(this);
	ref<Scheduler> sched = Scheduler::getInstance();

	ref<Sensor> sensor = m_scene->getSensor();
	ref<Sampler> sampler = m_scene->getSampler();

	/* Register the scene with the scheduler if needed */
	if (sceneResID == -1) {
		m_sceneResID = sched->registerResource(m_scene);
		m_ownsSceneResource = true;
	} else {
		m_sceneResID = sceneResID;
		m_ownsSceneResource = false;
	}

	/* Register the sensor with the scheduler if needed */
	if (sensorResID == -1) {
		m_sensorResID = sched->registerResource(sensor);
		m_ownsSensorResource = true;
	} else {
		m_sensorResID = sensorResID;
		m_ownsSensorResource = false;
	}

	/* Register the sampler with the scheduler if needed */
	if (samplerResID == -1) {
		/* Create a sampler instance for every core */
		std::vector<SerializableObject *> samplers(sched->getCoreCount());
		for (size_t i=0; i<sched->getCoreCount(); ++i) {
			ref<Sampler> clonedSampler = sampler->clone();
			clonedSampler->incRef();
			samplers[i] = clonedSampler.get();
		}
		m_samplerResID = sched->registerMultiResource(samplers);
		for (size_t i=0; i<sched->getCoreCount(); ++i)
			samplers[i]->decRef();
		m_ownsSamplerResource = true;
	} else {
		m_samplerResID = samplerResID;
		m_ownsSamplerResource = false;
	}
	m_cancelled = false;
}
Пример #6
0
void GrGLProgramBuilder::emitAndInstallProc(const GrPendingFragmentStage& fs,
                                            int index,
                                            const char* outColor,
                                            const char* inColor) {
    GrGLInstalledFragProc* ifp = SkNEW(GrGLInstalledFragProc);

    const GrFragmentProcessor& fp = *fs.processor();
    ifp->fGLProc.reset(fp.createGLInstance());

    SkSTArray<4, GrGLProcessor::TextureSampler> samplers(fp.numTextures());
    this->emitSamplers(fp, &samplers, ifp);

    ifp->fGLProc->emitCode(this, fp, outColor, inColor, fOutCoords[index], samplers);

    // We have to check that effects and the code they emit are consistent, ie if an effect
    // asks for dst color, then the emit code needs to follow suit
    verify(fp);
    fFragmentProcessors->fProcs.push_back(ifp);
}
Пример #7
0
void GrGLProgramBuilder::emitAndInstallXferProc(const GrXferProcessor& xp,
                                                const GrGLSLExpr4& colorIn,
                                                const GrGLSLExpr4& coverageIn) {
    // Program builders have a bit of state we need to clear with each effect
    AutoStageAdvance adv(this);

    SkASSERT(!fXferProcessor);
    fXferProcessor = new GrGLInstalledXferProc;

    fXferProcessor->fGLProc.reset(xp.createGLSLInstance());

    // Enable dual source secondary output if we have one
    if (xp.hasSecondaryOutput()) {
        fFS.enableSecondaryOutput();
    }

    if (this->glslCaps()->mustDeclareFragmentShaderOutput()) {
        fFS.enableCustomOutput();
    }

    SkString openBrace;
    openBrace.printf("{ // Xfer Processor: %s\n", xp.name());
    fFS.codeAppend(openBrace.c_str());

    SkSTArray<4, GrGLSLTextureSampler> samplers(xp.numTextures());
    this->emitSamplers(xp, &samplers, fXferProcessor);

    GrGLSLXferProcessor::EmitArgs args(this,
                                       &fFS,
                                       this->glslCaps(),
                                       xp, colorIn.c_str(),
                                       coverageIn.c_str(),
                                       fFS.getPrimaryColorOutputName(),
                                       fFS.getSecondaryColorOutputName(),
                                       samplers);
    fXferProcessor->fGLProc->emitCode(args);

    // We have to check that effects and the code they emit are consistent, ie if an effect
    // asks for dst color, then the emit code needs to follow suit
    verify(xp);
    fFS.codeAppend("}");
}
Пример #8
0
ref<Bitmap> BidirectionalUtils::mltLuminancePass(Scene *scene, int sceneResID,
		RenderQueue *queue, int sizeFactor, ref<RenderJob> &nestedJob) {
	ref<PluginManager> pluginMgr = PluginManager::getInstance();
	ref<Scheduler> scheduler = Scheduler::getInstance();
	Properties integratorProps = scene->getIntegrator()->getProperties();

	Vector2i origCropSize   = scene->getFilm()->getCropSize();
	Vector2i origSize       = scene->getFilm()->getSize();

	Vector2i reducedSize = Vector2i(
		std::max(1, origSize.x / sizeFactor),
		std::max(1, origSize.y / sizeFactor));

	Vector2i reducedCropSize = Vector2i(
		std::max(1, origCropSize.x / sizeFactor),
		std::max(1, origCropSize.y / sizeFactor));

	Point2i reducedCropOffset =
		scene->getFilm()->getCropOffset()/sizeFactor;

	size_t sampleCount = scene->getSampler()->getSampleCount();
	const Sensor *sensor = scene->getSensor();

	Properties filmProps("hdrfilm");
	filmProps.setInteger("width", reducedSize.x, false);
	filmProps.setInteger("height", reducedSize.y, false);
	filmProps.setInteger("cropWidth", reducedCropSize.x, false);
	filmProps.setInteger("cropHeight", reducedCropSize.y, false);
	filmProps.setInteger("cropOffsetX", reducedCropOffset.x, false);
	filmProps.setInteger("cropOffsetY", reducedCropOffset.x, false);
	ref<Film> nestedFilm = static_cast<Film *>(
		pluginMgr->createObject(Film::m_theClass, filmProps));
	nestedFilm->configure();

	/* Use a higher number of mutations/pixel compared to the second stage */
	Properties samplerProps("independent");
	samplerProps.setSize("sampleCount", sampleCount * sizeFactor);
	ref<Sampler> nestedSampler = static_cast<Sampler *>(
		pluginMgr->createObject(Sampler::m_theClass, samplerProps));
	nestedSampler->configure();
	std::vector<SerializableObject *> samplers(scheduler->getCoreCount());
	for (size_t i=0; i<scheduler->getCoreCount(); ++i) {
		ref<Sampler> clonedSampler = nestedSampler->clone();
		clonedSampler->incRef();
		samplers[i] = clonedSampler.get();
	}
	int nestedSamplerResID = scheduler->registerMultiResource(samplers);
	for (size_t i=0; i<scheduler->getCoreCount(); ++i)
		samplers[i]->decRef();

	/* Configure the sensor */
	Properties sensorProps = sensor->getProperties();
	ref<Sensor> nestedSensor = static_cast<Sensor *>
		(pluginMgr->createObject(Sensor::m_theClass, sensorProps));
	nestedSensor->addChild(nestedSampler);
	nestedSensor->addChild(nestedFilm);
	nestedSensor->configure();
	int nestedSensorResID = scheduler->registerResource(nestedSensor);

	integratorProps.setBoolean("firstStage", true, false);
	ref<Integrator> nestedIntegrator = static_cast<Integrator *> (pluginMgr->
			createObject(Integrator::m_theClass, integratorProps));

	ref<Scene> nestedScene = new Scene(scene);
	nestedScene->setSensor(nestedSensor);
	nestedScene->setIntegrator(nestedIntegrator);
	nestedScene->configure();
	nestedScene->initialize();

	nestedJob = new RenderJob("mlti", nestedScene, queue,
		sceneResID, nestedSensorResID, nestedSamplerResID);

	nestedJob->start();
	if (!nestedJob->wait()) {
		nestedJob = NULL;
		scheduler->unregisterResource(nestedSensorResID);
		scheduler->unregisterResource(nestedSamplerResID);
		return NULL;
	}
	nestedJob = NULL;

	scheduler->unregisterResource(nestedSensorResID);
	scheduler->unregisterResource(nestedSamplerResID);

	/* Instantiate a Gaussian reconstruction filter */
	ref<ReconstructionFilter> rfilter = static_cast<ReconstructionFilter *> (
		PluginManager::getInstance()->createObject(
		MTS_CLASS(ReconstructionFilter), Properties("gaussian")));
	rfilter->configure();

	/* Develop the rendered image into a luminance bitmap */
	ref<Bitmap> luminanceMap = new Bitmap(Bitmap::ELuminance,
		Bitmap::EFloat, reducedCropSize);
	nestedFilm->develop(Point2i(0, 0), reducedCropSize,
		Point2i(0, 0), luminanceMap);

	/* Up-sample the low resolution luminance map */
	luminanceMap = luminanceMap->resample(rfilter,
		ReconstructionFilter::EClamp,
		ReconstructionFilter::EClamp, origCropSize,
		0.0f, std::numeric_limits<Float>::infinity());

	return luminanceMap;
}
Пример #9
0
MTS_NAMESPACE_BEGIN

ref<Bitmap> BidirectionalUtils::renderDirectComponent(Scene *scene, int sceneResID,
		int sensorResID, RenderQueue *queue, const RenderJob *job, size_t directSamples) {
	ref<PluginManager> pluginMgr = PluginManager::getInstance();
	ref<Scheduler> scheduler = Scheduler::getInstance();
	const Film *film = scene->getFilm();
	Integrator *integrator = scene->getIntegrator();
	/* Render the direct illumination component separately */
	ref<Bitmap> directImage = new Bitmap(Bitmap::ERGBA, Bitmap::EFloat32, film->getCropSize());
	bool hasMedia = scene->getMedia().size() > 0;
	size_t pixelSamples = directSamples;
	Properties integratorProps(hasMedia ? "volpath" : "direct");

	if (hasMedia) {
		/* Render with a volumetric path tracer */
		integratorProps.setInteger("maxDepth", 2);
	} else {
		/* No participating media-> we can use the 'direct' plugin, which has
		   somewhat better control over where to place shading/pixel samples */
		int shadingSamples = 1;
		while (pixelSamples > 8) {
			pixelSamples /= 2;
			shadingSamples *= 2;
		}
		integratorProps.setSize("shadingSamples", shadingSamples);
	}

	ref<Integrator> directIntegrator = static_cast<Integrator *> (pluginMgr->
			createObject(Integrator::m_theClass, integratorProps));
	/* Create a low discrepancy sampler instance for every core */
	Properties samplerProps("ldsampler");
	samplerProps.setSize("sampleCount", pixelSamples);
	ref<Sampler> ldSampler = static_cast<Sampler *> (pluginMgr->
			createObject(Sampler::m_theClass, samplerProps));
	ldSampler->configure();
	directIntegrator->configure();
	directIntegrator->configureSampler(scene, ldSampler);
	std::vector<SerializableObject *> samplers(scheduler->getCoreCount());
	for (size_t i=0; i<scheduler->getCoreCount(); ++i) {
		ref<Sampler> clonedSampler = ldSampler->clone();
		clonedSampler->incRef();
		samplers[i] = clonedSampler.get();
	}
	int ldSamplerResID = scheduler->registerMultiResource(samplers);
	for (size_t i=0; i<scheduler->getCoreCount(); ++i)
		samplers[i]->decRef();

	integrator->incRef();
	scene->setIntegrator(directIntegrator);
	bool success = directIntegrator->render(scene, queue, job,
		sceneResID, sensorResID, ldSamplerResID);
	scene->setIntegrator(integrator);
	integrator->decRef();
	scheduler->unregisterResource(ldSamplerResID);

	if (success) {
		ref<Bitmap> bitmap = new Bitmap(
			Bitmap::ESpectrum, Bitmap::EFloat,
			film->getCropSize());
		film->develop(Point2i(0, 0),
			film->getCropSize(), Point2i(0, 0), bitmap);
		return bitmap;
	} else {
		return NULL;
	}
}
Пример #10
0
	bool render(Scene *scene, RenderQueue *queue, 
		const RenderJob *job, int sceneResID, int cameraResID, int unused) {
		ref<Scheduler> sched = Scheduler::getInstance();
		ref<Camera> camera = scene->getCamera();
		ref<Film> film = camera->getFilm();
		size_t nCores = sched->getCoreCount();
		Log(EInfo, "Starting render job (%ix%i, " SIZE_T_FMT " %s, " SSE_STR ") ..", 
			film->getCropSize().x, film->getCropSize().y, 
			nCores, nCores == 1 ? "core" : "cores");

		Vector2i cropSize = film->getCropSize();
		Point2i cropOffset = film->getCropOffset();

		m_gatherBlocks.clear();
		m_running = true;
		m_totalEmitted = 0;

		ref<Sampler> sampler = static_cast<Sampler *> (PluginManager::getInstance()->
			createObject(MTS_CLASS(Sampler), Properties("independent")));

		/* Allocate memory */
		m_bitmap = new Bitmap(film->getSize().x, film->getSize().y, 128);
		m_bitmap->clear();
		for (int yofs=0; yofs<cropSize.y; yofs += m_blockSize) {
			for (int xofs=0; xofs<cropSize.x; xofs += m_blockSize) {
				m_gatherBlocks.push_back(std::vector<GatherPoint>());
				m_offset.push_back(Point2i(cropOffset.x + xofs, cropOffset.y + yofs));
				std::vector<GatherPoint> &gatherPoints = m_gatherBlocks[m_gatherBlocks.size()-1];
				int nPixels = std::min(m_blockSize, cropSize.y-yofs)
							* std::min(m_blockSize, cropSize.x-xofs);
				gatherPoints.resize(nPixels);
				for (int i=0; i<nPixels; ++i)
					gatherPoints[i].radius = m_initialRadius;
			}
		}

		/* Create a sampler instance for every core */
		std::vector<SerializableObject *> samplers(sched->getCoreCount());
		for (size_t i=0; i<sched->getCoreCount(); ++i) {
			ref<Sampler> clonedSampler = sampler->clone();
			clonedSampler->incRef();
			samplers[i] = clonedSampler.get();
		}

		int samplerResID = sched->registerManifoldResource(
			static_cast<std::vector<SerializableObject*> &>(samplers)); 

#ifdef MTS_DEBUG_FP
		enableFPExceptions();
#endif

		int it=0;
		while (m_running) { 
			distributedRTPass(scene, samplers);
			photonMapPass(++it, queue, job, film, sceneResID, 
					cameraResID, samplerResID);
		}

#ifdef MTS_DEBUG_FP
		disableFPExceptions();
#endif

		for (size_t i=0; i<sched->getCoreCount(); ++i)
			samplers[i]->decRef();
		sched->unregisterResource(samplerResID);
		return true;
	}
Пример #11
0
	bool preprocess(const Scene *scene, RenderQueue *queue, const RenderJob *job,
			int sceneResID, int sensorResID, int samplerResID) {
		SamplingIntegrator::preprocess(scene, queue, job, sceneResID, sensorResID, samplerResID);
		/* Create a deterministic sampler for the photon gathering step */
		ref<Scheduler> sched = Scheduler::getInstance();
		ref<Sampler> sampler = static_cast<Sampler *> (PluginManager::getInstance()->
			createObject(MTS_CLASS(Sampler), Properties("halton")));
		/* Create a sampler instance for every core */
		std::vector<SerializableObject *> samplers(sched->getCoreCount());
		for (size_t i=0; i<sched->getCoreCount(); ++i) {
			ref<Sampler> clonedSampler = sampler->clone();
			clonedSampler->incRef();
			samplers[i] = clonedSampler.get();
		}
		int qmcSamplerID = sched->registerMultiResource(samplers);
		for (size_t i=0; i<samplers.size(); ++i)
			samplers[i]->decRef();

		const ref_vector<Medium> &media = scene->getMedia();
		for (ref_vector<Medium>::const_iterator it = media.begin(); it != media.end(); ++it) {
			if (!(*it)->isHomogeneous())
				Log(EError, "Inhomogeneous media are currently not supported by the photon mapper!");
		}

		if (m_globalPhotonMap.get() == NULL && m_globalPhotons > 0) {
			/* Generate the global photon map */
			ref<GatherPhotonProcess> proc = new GatherPhotonProcess(
				GatherPhotonProcess::ESurfacePhotons, m_globalPhotons,
				m_granularity, m_maxDepth-1, m_rrDepth, m_gatherLocally,
				m_autoCancelGathering, job);

			proc->bindResource("scene", sceneResID);
			proc->bindResource("sensor", sensorResID);
			proc->bindResource("sampler", qmcSamplerID);

			m_proc = proc;
			sched->schedule(proc);
			sched->wait(proc);
			m_proc = NULL;

			if (proc->getReturnStatus() != ParallelProcess::ESuccess)
				return false;

			ref<PhotonMap> globalPhotonMap = proc->getPhotonMap();
			if (globalPhotonMap->isFull()) {
				Log(EDebug, "Global photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
					SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());

				m_globalPhotonMap = globalPhotonMap;
				m_globalPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
				m_globalPhotonMap->build();
				m_globalPhotonMapID = sched->registerResource(m_globalPhotonMap);
			}
		}

		if (m_causticPhotonMap.get() == NULL && m_causticPhotons > 0) {
			/* Generate the caustic photon map */
			ref<GatherPhotonProcess> proc = new GatherPhotonProcess(
				GatherPhotonProcess::ECausticPhotons, m_causticPhotons,
				m_granularity, m_maxDepth-1, m_rrDepth, m_gatherLocally,
				m_autoCancelGathering, job);

			proc->bindResource("scene", sceneResID);
			proc->bindResource("sensor", sensorResID);
			proc->bindResource("sampler", qmcSamplerID);

			m_proc = proc;
			sched->schedule(proc);
			sched->wait(proc);
			m_proc = NULL;

			if (proc->getReturnStatus() != ParallelProcess::ESuccess)
				return false;

			ref<PhotonMap> causticPhotonMap = proc->getPhotonMap();
			if (causticPhotonMap->isFull()) {
				Log(EDebug, "Caustic photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
					SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());

				m_causticPhotonMap = causticPhotonMap;
				m_causticPhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
				m_causticPhotonMap->build();
				m_causticPhotonMapID = sched->registerResource(m_causticPhotonMap);
			}
		}

		size_t volumePhotons = scene->getMedia().size() == 0 ? 0 : m_volumePhotons;
		if (m_volumePhotonMap.get() == NULL && volumePhotons > 0) {
			/* Generate the volume photon map */
			ref<GatherPhotonProcess> proc = new GatherPhotonProcess(
				GatherPhotonProcess::EVolumePhotons, volumePhotons,
				m_granularity, m_maxDepth-1, m_rrDepth, m_gatherLocally,
				m_autoCancelGathering, job);

			proc->bindResource("scene", sceneResID);
			proc->bindResource("sensor", sensorResID);
			proc->bindResource("sampler", qmcSamplerID);

			m_proc = proc;
			sched->schedule(proc);
			sched->wait(proc);
			m_proc = NULL;

			if (proc->getReturnStatus() != ParallelProcess::ESuccess)
				return false;

			ref<PhotonMap> volumePhotonMap = proc->getPhotonMap();
			if (volumePhotonMap->isFull()) {
				Log(EDebug, "Volume photon map full. Shot " SIZE_T_FMT " particles, excess photons due to parallelism: "
					SIZE_T_FMT, proc->getShotParticles(), proc->getExcessPhotons());

				volumePhotonMap->setScaleFactor(1 / (Float) proc->getShotParticles());
				volumePhotonMap->build();
				m_bre = new BeamRadianceEstimator(volumePhotonMap, m_volumeLookupSize);
				m_breID = sched->registerResource(m_bre);
			}
		}

		/* Adapt to scene extents */
		m_globalLookupRadius = m_globalLookupRadiusRel * scene->getBSphere().radius;
		m_causticLookupRadius = m_causticLookupRadiusRel * scene->getBSphere().radius;

		sched->unregisterResource(qmcSamplerID);

		return true;
	}
Пример #12
0
	bool render(Scene *scene, RenderQueue *queue, 
		const RenderJob *job, int sceneResID, int cameraResID, int samplerResID) {
		ref<Scheduler> sched = Scheduler::getInstance();
		ref<Camera> camera = scene->getCamera();
		ref<Film> film = camera->getFilm();
		size_t nCores = sched->getCoreCount();
		Sampler *cameraSampler = (Sampler *) sched->getResource(samplerResID, 0);
	
		size_t sampleCount = cameraSampler->getSampleCount();
		Log(EInfo, "Starting render job (%ix%i, " SIZE_T_FMT " %s, " SIZE_T_FMT 
			" %s, " SSE_STR ") ..", film->getCropSize().x, film->getCropSize().y, 
			sampleCount, sampleCount == 1 ? "sample" : "samples", nCores, 
			nCores == 1 ? "core" : "cores");

		Vector2i cropSize = film->getCropSize();
		Point2i cropOffset = film->getCropOffset();

		m_gatherPoints.clear();
		m_running = true;
		for (size_t i=0; i<m_blocks.size(); ++i)
			m_blocks[i]->decRef();
		m_blocks.clear();

		m_totalEmitted = 0;
		bool needsLensSample = camera->needsLensSample();
		bool needsTimeSample = camera->needsTimeSample();
		Log(EInfo, "Creating approximately %i gather points", cropSize.x*cropSize.y*sampleCount);
		Point2 lensSample, sample;
		RayDifferential eyeRay;
		Float timeSample = 0;
		m_filter = camera->getFilm()->getTabulatedFilter();
		Vector2 filterSize = m_filter->getFilterSize();
		int borderSize = (int) std::ceil(std::max(filterSize.x, filterSize.y));

		ref<Sampler> independentSampler = static_cast<Sampler *> (PluginManager::getInstance()->
			createObject(MTS_CLASS(Sampler), Properties("independent")));

		/* Create a sampler instance for every core */
		std::vector<SerializableObject *> samplers(sched->getCoreCount());
		for (size_t i=0; i<sched->getCoreCount(); ++i) {
			ref<Sampler> clonedSampler = independentSampler->clone();
			clonedSampler->incRef();
			samplers[i] = clonedSampler.get();
		}

		int independentSamplerResID = sched->registerManifoldResource(samplers); 
		for (size_t i=0; i<sched->getCoreCount(); ++i)
			samplers[i]->decRef();

#ifdef MTS_DEBUG_FP
		enableFPExceptions();
#endif

		/* Create gather points in blocks so that gathering can be parallelized later on */
		for (int yofs=0; yofs<cropSize.y; yofs += m_blockSize) {
			for (int xofs=0; xofs<cropSize.x; xofs += m_blockSize) {
				ImageBlock *block = new ImageBlock(Vector2i(m_blockSize, m_blockSize), borderSize, 
					true, true, false, false);
				block->setSize(Vector2i(m_blockSize, m_blockSize));
				block->setOffset(Point2i(cropOffset.x + xofs, cropOffset.y + yofs));
				block->incRef();
				std::vector<GatherPoint> gatherPoints;
				gatherPoints.reserve(m_blockSize*m_blockSize*sampleCount);
				for (int yofsInt = 0; yofsInt < m_blockSize; ++yofsInt) {
					if (yofsInt + yofs >= cropSize.y)
						continue;
					for (int xofsInt = 0; xofsInt < m_blockSize; ++xofsInt) {
						if (xofsInt + xofs >= cropSize.x)
							continue;
						int y = cropOffset.y + yofs + yofsInt;
						int x = cropOffset.x + xofs + xofsInt;
						cameraSampler->generate();
						for (size_t j = 0; j<sampleCount; j++) {
							if (needsLensSample)
								lensSample = cameraSampler->next2D();
							if (needsTimeSample)
								timeSample = cameraSampler->next1D();
							sample = cameraSampler->next2D();
							sample.x += x; sample.y += y;
							camera->generateRayDifferential(sample, 
								lensSample, timeSample, eyeRay);
							size_t offset = gatherPoints.size();
							Float count = (Float) createGatherPoints(scene, eyeRay, sample, 
									cameraSampler, Spectrum(1.0f),
								gatherPoints, 1);
							if (count > 1) { // necessary because of filter weight computation
								for (int i = 0; i<count; ++i)
									gatherPoints[offset+i].weight *= count;
							}

							cameraSampler->advance();
						}
					}
				}
				m_blocks.push_back(block);
				m_gatherPoints.push_back(gatherPoints);
			}
		}

		int it=0;
		while (m_running) 
			photonMapPass(++it, queue, job, film, sceneResID, cameraResID, independentSamplerResID);

#ifdef MTS_DEBUG_FP
		disableFPExceptions();
#endif

		sched->unregisterResource(independentSamplerResID);
		return true;
	}