Spectrum MetropolisRenderer::PathL(const MLTSample &sample, const Scene *scene, MemoryArena &arena, const Camera *camera, const Distribution1D *lightDistribution, PathVertex *cameraPath, PathVertex *lightPath, RNG &rng) const { // Generate camera path from camera path samples PBRT_STARTED_GENERATING_CAMERA_RAY((CameraSample *)(&sample.cameraSample)); RayDifferential cameraRay; float cameraWt = camera->GenerateRayDifferential(sample.cameraSample, &cameraRay); cameraRay.ScaleDifferentials(1.f / sqrtf(nPixelSamples)); PBRT_FINISHED_GENERATING_CAMERA_RAY((CameraSample *)(&sample.cameraSample), &cameraRay, cameraWt); RayDifferential escapedRay; Spectrum escapedAlpha; uint32_t cameraLength = GeneratePath(cameraRay, cameraWt, scene, arena, sample.cameraPathSamples, cameraPath, &escapedRay, &escapedAlpha); if (!bidirectional) { // Compute radiance along path using path tracing return Lpath(scene, cameraPath, cameraLength, arena, sample.lightingSamples, rng, sample.cameraSample.time, lightDistribution, escapedRay, escapedAlpha); } else { // Sample light ray and apply bidirectional path tracing // Choose light and sample ray to start light path PBRT_MLT_STARTED_SAMPLE_LIGHT_FOR_BIDIR(); float lightPdf, lightRayPdf; uint32_t lightNum = lightDistribution->SampleDiscrete(sample.lightNumSample, &lightPdf); const Light *light = scene->lights[lightNum]; Ray lightRay; Normal Nl; LightSample lrs(sample.lightRaySamples[0], sample.lightRaySamples[1], sample.lightRaySamples[2]); Spectrum lightWt = light->Sample_L(scene, lrs, sample.lightRaySamples[3], sample.lightRaySamples[4], sample.cameraSample.time, &lightRay, &Nl, &lightRayPdf); PBRT_MLT_FINISHED_SAMPLE_LIGHT_FOR_BIDIR(); if (lightWt.IsBlack() || lightRayPdf == 0.f) { // Compute radiance along path using path tracing return Lpath(scene, cameraPath, cameraLength, arena, sample.lightingSamples, rng, sample.cameraSample.time, lightDistribution, escapedRay, escapedAlpha); } else { // Compute radiance along paths using bidirectional path tracing lightWt *= AbsDot(Normalize(Nl), lightRay.d) / (lightPdf * lightRayPdf); uint32_t lightLength = GeneratePath(RayDifferential(lightRay), lightWt, scene, arena, sample.lightPathSamples, lightPath, NULL, NULL); return Lbidir(scene, cameraPath, cameraLength, lightPath, lightLength, arena, sample.lightingSamples, rng, sample.cameraSample.time, lightDistribution, escapedRay, escapedAlpha); } } }
void SamplingIntegrator::renderBlock(const Scene *scene, const Sensor *sensor, Sampler *sampler, ImageBlock *block, const bool &stop, const std::vector< TPoint2<uint8_t> > &points) const { Float diffScaleFactor = 1.0f / std::sqrt((Float) sampler->getSampleCount()); bool needsApertureSample = sensor->needsApertureSample(); bool needsTimeSample = sensor->needsTimeSample(); RadianceQueryRecord rRec(scene, sampler); Point2 apertureSample(0.5f); Float timeSample = 0.5f; RayDifferential sensorRay; block->clear(); uint32_t queryType = RadianceQueryRecord::ESensorRay; if (!sensor->getFilm()->hasAlpha()) /* Don't compute an alpha channel if we don't have to */ queryType &= ~RadianceQueryRecord::EOpacity; for (size_t i = 0; i<points.size(); ++i) { Point2i offset = Point2i(points[i]) + Vector2i(block->getOffset()); if (stop) break; sampler->generate(offset); for (size_t j = 0; j<sampler->getSampleCount(); j++) { rRec.newQuery(queryType, sensor->getMedium()); Point2 samplePos(Point2(offset) + Vector2(rRec.nextSample2D())); if (needsApertureSample) apertureSample = rRec.nextSample2D(); if (needsTimeSample) timeSample = rRec.nextSample1D(); Spectrum spec = sensor->sampleRayDifferential( sensorRay, samplePos, apertureSample, timeSample); sensorRay.scaleDifferential(diffScaleFactor); spec *= Li(sensorRay, rRec); block->put(samplePos, spec, rRec.alpha); sampler->advance(); } } }
void BlockedRenderer::RenderBlock(int taskId) { const int tileY = taskId / m_TilesNumX; const int tileX = taskId - tileY * m_TilesNumX; const int x0 = tileX * m_BlockSize; const int x1 = std::min(x0 + m_BlockSize, m_XRes); const int y0 = tileY * m_BlockSize; const int y1 = std::min(y0 + m_BlockSize, m_YRes); std::shared_ptr<Sampler> localSampler = m_Sampler->Clone(); for(int y = y0; y < y1; y++) { for(int x = x0; x < x1; x++) { for(int s = 0; s < m_Spp; s++) { RayDifferential rayDiff; Vector2 offset = localSampler->Next2D(); Ray ray = m_Camera->GenerateRay(Point2(x, y) + offset, &rayDiff); rayDiff.Scale(1.f/std::sqrt(m_Spp)); Intersection isect; RGBSpectrum L(0.f); if(m_Scene->Intersect(ray, &isect, &rayDiff)) { for(auto light = m_Scene->GetLightIteratorBegin(); light != m_Scene->GetLightIteratorEnd(); light++) { Vector wo = -ray.dir; float lightPdf = 0.f; Point2 lightSample = localSampler->Next2D(); Vector wi; RGBSpectrum lightContrib = LightSampling(light, lightSample, isect, wo, &wi, &lightPdf); float lightWeight = lightPdf / (lightPdf + isect.bsdf->SamplePdf(isect, wo, wi)); L += lightWeight * lightContrib; float bsdfPdf = 0.f; Point2 bsdfSample = localSampler->Next2D(); RGBSpectrum bsdfContrib = BSDFSampling(light, bsdfSample, isect, wo, &wi, &bsdfPdf); float bsdfWeight = bsdfPdf / (bsdfPdf + (*light)->SampleDirectPdf(isect, wi)); L += bsdfWeight * bsdfContrib; } } m_Film->AddSample(x, y, L); } localSampler->NextSequence(); } } }
Spectrum sampleRayDifferential(RayDifferential &ray, const Point2 &pixelSample, const Point2 &otherSample, Float timeSample) const { /* Record pixel index, added by Lifan */ ray.index.x = (int)std::floor(pixelSample.x); ray.index.y = (int)std::floor(pixelSample.y); Point2 tmp = warp::squareToUniformDiskConcentric(otherSample) * m_apertureRadius; ray.time = sampleTime(timeSample); /* Compute the corresponding position on the near plane (in local camera space) */ Point nearP = m_sampleToCamera(Point( pixelSample.x * m_invResolution.x, pixelSample.y * m_invResolution.y, 0.0f)); /* Aperture position */ Point apertureP(tmp.x, tmp.y, 0.0f); /* Sampled position on the focal plane */ Float fDist = m_focusDistance / nearP.z; Point focusP = nearP * fDist; Point focusPx = (nearP+m_dx) * fDist; Point focusPy = (nearP+m_dy) * fDist; /* Turn that into a normalized ray direction, and adjust the ray interval accordingly */ Vector d = normalize(focusP - apertureP); Float invZ = 1.0f / d.z; ray.mint = m_nearClip * invZ; ray.maxt = m_farClip * invZ; const Transform &trafo = m_worldTransform->eval(ray.time); ray.setOrigin(trafo.transformAffine(apertureP)); ray.setDirection(trafo(d)); ray.rxOrigin = ray.ryOrigin = ray.o; ray.rxDirection = trafo(normalize(Vector(focusPx - apertureP))); ray.ryDirection = trafo(normalize(Vector(focusPy - apertureP))); ray.hasDifferentials = true; return Spectrum(1.0f); }
int GenerateCameraSubpath(const Scene &scene, Sampler &sampler, MemoryArena &arena, int maxDepth, const Camera &camera, Point2f &pFilm, Vertex *path) { if (maxDepth == 0) return 0; // Sample initial ray for camera subpath CameraSample cameraSample; cameraSample.pFilm = pFilm; cameraSample.time = sampler.Get1D(); cameraSample.pLens = sampler.Get2D(); RayDifferential ray; Spectrum beta = camera.GenerateRayDifferential(cameraSample, &ray); ray.ScaleDifferentials(1 / std::sqrt(sampler.samplesPerPixel)); // Generate first vertex on camera subpath and start random walk Float pdfPos, pdfDir; path[0] = Vertex::CreateCamera(&camera, ray, beta); camera.Pdf_We(ray, &pdfPos, &pdfDir); return RandomWalk(scene, ray, sampler, arena, beta, pdfDir, maxDepth - 1, TransportMode::Radiance, path + 1) + 1; }
int GenerateCameraSubpath(const Scene &scene, Sampler &sampler, MemoryArena &arena, int maxdepth, const Camera &camera, Point2f &rasterPos, Vertex *path) { if (maxdepth == 0) return 0; // Sample initial ray for camera subpath CameraSample cameraSample; cameraSample.pFilm = rasterPos; cameraSample.time = sampler.Get1D(); cameraSample.pLens = sampler.Get2D(); RayDifferential ray; Spectrum rayWeight(camera.GenerateRayDifferential(cameraSample, &ray)); ray.ScaleDifferentials(1.f / std::sqrt(sampler.samplesPerPixel)); // Generate first vertex on camera subpath and start random walk path[0] = Vertex(VertexType::Camera, EndpointInteraction(&camera, ray), Spectrum(1.0f)); return RandomWalk(scene, ray, sampler, arena, rayWeight, camera.Pdf(path[0].ei, ray.d), maxdepth - 1, TransportMode::Radiance, path + 1) + 1; }
Spectrum sampleRayDifferential(RayDifferential &ray, const Point2 &pixelSample, const Point2 &otherSample, Float timeSample) const { ray.time = sampleTime(timeSample); const Transform &trafo = m_worldTransform->eval(ray.time); /* Compute the corresponding position on the near plane (in local camera space) */ Point nearP = m_sampleToCamera.transformAffine(Point( pixelSample.x * m_invResolution.x, pixelSample.y * m_invResolution.y, 0.0f)); nearP.z = 0.0f; ray.setOrigin(trafo.transformAffine(nearP)); ray.setDirection(normalize(trafo(Vector(0, 0, 1)))); ray.mint = m_nearClip; ray.maxt = m_farClip; ray.rxOrigin = trafo(nearP + m_dx); ray.ryOrigin = trafo(nearP + m_dy); ray.rxDirection = ray.ryDirection = ray.d; ray.hasDifferentials = true; return Spectrum(1.0f); }
Spectrum CreateRadianceProbes::Li(const Scene *scene, const RayDifferential &ray, const Sample *sample, RNG &rng, MemoryArena &arena, Intersection *isect, Spectrum *T) const { Assert(ray.time == sample->time); Spectrum localT; if (!T) T = &localT; Intersection localIsect; if (!isect) isect = &localIsect; Assert(!ray.HasNaNs()); Spectrum Lo = 0.f; if (scene->Intersect(ray, isect)) Lo = surfaceIntegrator->Li(scene, this, ray, *isect, sample, rng, arena); else { for (uint32_t i = 0; i < scene->lights.size(); ++i) Lo += scene->lights[i]->Le(ray); } Spectrum Lv = volumeIntegrator->Li(scene, this, ray, sample, rng, T, arena); return *T * Lo + Lv; }
void handleMiss(RayDifferential ray, const RadianceQueryRecord &rRec, Spectrum &E) const { /* Handle an irradiance cache miss */ HemisphereSampler *hs = m_hemisphereSampler.get(); Sampler *sampler = m_sampleGenerator.get(); RadianceQueryRecord rRec2; if (hs == NULL) { Properties props("independent"); sampler = static_cast<Sampler *> (PluginManager::getInstance()-> createObject(MTS_CLASS(Sampler), props)); hs = new HemisphereSampler(m_resolution, 2 * m_resolution); m_hemisphereSampler.set(hs); m_sampleGenerator.set(sampler); } /* Generate stratified cosine-weighted samples and compute rotational + translational gradients */ hs->generateDirections(rRec.its); sampler->generate(Point2i(0)); for (unsigned int j=0; j<hs->getM(); j++) { for (unsigned int k=0; k<hs->getN(); k++) { HemisphereSampler::SampleEntry &entry = (*hs)(j, k); entry.dist = std::numeric_limits<Float>::infinity(); rRec2.recursiveQuery(rRec, RadianceQueryRecord::ERadianceNoEmission | RadianceQueryRecord::EDistance); rRec2.extra = 1; rRec2.sampler = sampler; entry.L = m_subIntegrator->Li(RayDifferential(rRec.its.p, entry.d, ray.time), rRec2); entry.dist = rRec2.dist; sampler->advance(); } } hs->process(rRec.its); /* Undo ray differential scaling done by the integrator */ if (ray.hasDifferentials) ray.scaleDifferential(m_diffScaleFactor); m_irrCache->put(ray, rRec.its, *hs); E = hs->getIrradiance(); }
Spectrum SamplerRenderer::Li(const Scene *scene, const RayDifferential &ray, const Sample *sample, RNG &rng, MemoryArena &arena, Intersection *isect, Spectrum *T) const { Assert(ray.time == sample->time); Assert(!ray.HasNaNs()); // Allocate local variables for _isect_ and _T_ if needed Spectrum localT; if (!T) T = &localT; Intersection localIsect; if (!isect) isect = &localIsect; Spectrum Lo = 0.f; if (scene->Intersect(ray, isect)) Lo = surfaceIntegrator->Li(scene, this, ray, *isect, sample, rng, arena); else { // Handle ray that doesn't intersect any geometry for (uint32_t i = 0; i < scene->lights.size(); ++i) Lo += scene->lights[i]->Le(ray); } Spectrum Lv = volumeIntegrator->Li(scene, this, ray, sample, rng, T, arena); return *T * Lo + Lv; }
void SampleIntegrator::renderBlock(const Scene *scene, const Camera *camera, Sampler *sampler, ImageBlock *block, const bool &stop, const std::vector<Point2i> *points) const { Point2 sample, lensSample; RayDifferential eyeRay; Float timeSample = 0; Spectrum spec; block->clear(); RadianceQueryRecord rRec(scene, sampler); bool needsLensSample = camera->needsLensSample(); bool needsTimeSample = camera->needsTimeSample(); const TabulatedFilter *filter = camera->getFilm()->getTabulatedFilter(); Float scaleFactor = 1.0f/std::sqrt((Float) sampler->getSampleCount()); if (points) { /* Use a prescribed traversal order (e.g. using a space-filling curve) */ if (!block->collectStatistics()) { for (size_t i=0; i<points->size(); ++i) { Point2i offset = (*points)[i] + Vector2i(block->getOffset()); if (stop) break; sampler->generate(); for (size_t j = 0; j<sampler->getSampleCount(); j++) { rRec.newQuery(RadianceQueryRecord::ECameraRay, camera->getMedium()); if (needsLensSample) lensSample = rRec.nextSample2D(); if (needsTimeSample) timeSample = rRec.nextSample1D(); sample = rRec.nextSample2D(); sample.x += offset.x; sample.y += offset.y; camera->generateRayDifferential(sample, lensSample, timeSample, eyeRay); eyeRay.scaleDifferential(scaleFactor); spec = Li(eyeRay, rRec); block->putSample(sample, spec, rRec.alpha, filter); sampler->advance(); } } } else { Spectrum mean, meanSqr; for (size_t i=0; i<points->size(); ++i) { Point2i offset = (*points)[i] + Vector2i(block->getOffset()); if (stop) break; sampler->generate(); mean = meanSqr = Spectrum(0.0f); for (size_t j = 0; j<sampler->getSampleCount(); j++) { rRec.newQuery(RadianceQueryRecord::ECameraRay, camera->getMedium()); if (needsLensSample) lensSample = rRec.nextSample2D(); if (needsTimeSample) timeSample = rRec.nextSample1D(); sample = rRec.nextSample2D(); sample.x += offset.x; sample.y += offset.y; camera->generateRayDifferential(sample, lensSample, timeSample, eyeRay); eyeRay.scaleDifferential(scaleFactor); spec = Li(eyeRay, rRec); /* Numerically robust online variance estimation using an algorithm proposed by Donald Knuth (TAOCP vol.2, 3rd ed., p.232) */ const Spectrum delta = spec - mean; mean += delta / ((Float) j+1); meanSqr += delta * (spec - mean); block->putSample(sample, spec, rRec.alpha, filter); block->setVariance(offset.x, offset.y, meanSqr / (Float) j, (int) j+1); sampler->advance(); } } } } else { /* Simple scanline traversal order */ const int sx = block->getOffset().x, sy = block->getOffset().y, ex = sx + block->getSize().x, ey = sy + block->getSize().y; if (!block->collectStatistics()) { for (int y = sy; y < ey; y++) { for (int x = sx; x < ex; x++) { if (stop) break; sampler->generate(); for (size_t j = 0; j<sampler->getSampleCount(); j++) { rRec.newQuery(RadianceQueryRecord::ECameraRay, camera->getMedium()); if (needsLensSample) lensSample = rRec.nextSample2D(); if (needsTimeSample) timeSample = rRec.nextSample1D(); sample = rRec.nextSample2D(); sample.x += x; sample.y += y; camera->generateRayDifferential(sample, lensSample, timeSample, eyeRay); eyeRay.scaleDifferential(scaleFactor); spec = Li(eyeRay, rRec); block->putSample(sample, spec, rRec.alpha, filter); sampler->advance(); } } } } else { Spectrum mean, meanSqr; for (int y = sy; y < ey; y++) { for (int x = sx; x < ex; x++) { if (stop) break; sampler->generate(); mean = meanSqr = Spectrum(0.0f); for (size_t j = 0; j<sampler->getSampleCount(); j++) { rRec.newQuery(RadianceQueryRecord::ECameraRay, camera->getMedium()); if (needsLensSample) lensSample = rRec.nextSample2D(); if (needsTimeSample) timeSample = rRec.nextSample1D(); sample = rRec.nextSample2D(); sample.x += x; sample.y += y; camera->generateRayDifferential(sample, lensSample, timeSample, eyeRay); eyeRay.scaleDifferential(scaleFactor); spec = Li(eyeRay, rRec); /* Numerically robust online variance estimation using an algorithm proposed by Donald Knuth (TAOCP vol.2, 3rd ed., p.232) */ const Spectrum delta = spec - mean; mean += delta / ((Float) j+1); meanSqr += delta * (spec - mean); block->putSample(sample, spec, rRec.alpha, filter); block->setVariance(x, y, meanSqr / (Float) j, (int) j+1); sampler->advance(); } } } } } }
// SamplerIntegrator Method Definitions void SamplerIntegrator::Render(const Scene &scene) { ProfilePhase p(Prof::IntegratorRender); Preprocess(scene, *sampler); // Render image tiles in parallel // Compute number of tiles, _nTiles_, to use for parallel rendering Bounds2i sampleBounds = camera->film->GetSampleBounds(); Vector2i sampleExtent = sampleBounds.Diagonal(); const int tileSize = 16; Point2i nTiles((sampleExtent.x + tileSize - 1) / tileSize, (sampleExtent.y + tileSize - 1) / tileSize); ProgressReporter reporter(nTiles.x * nTiles.y, "Rendering"); { StatTimer timer(&renderingTime); ParallelFor2D([&](Point2i tile) { // Render section of image corresponding to _tile_ // Allocate _MemoryArena_ for tile MemoryArena arena; // Get sampler instance for tile int seed = tile.y * nTiles.x + tile.x; std::unique_ptr<Sampler> tileSampler = sampler->Clone(seed); // Compute sample bounds for tile int x0 = sampleBounds.pMin.x + tile.x * tileSize; int x1 = std::min(x0 + tileSize, sampleBounds.pMax.x); int y0 = sampleBounds.pMin.y + tile.y * tileSize; int y1 = std::min(y0 + tileSize, sampleBounds.pMax.y); Bounds2i tileBounds(Point2i(x0, y0), Point2i(x1, y1)); // Get _FilmTile_ for tile std::unique_ptr<FilmTile> filmTile = camera->film->GetFilmTile(tileBounds); // Loop over pixels in tile to render them for (Point2i pixel : tileBounds) { { ProfilePhase pp(Prof::StartPixel); tileSampler->StartPixel(pixel); } do { // Initialize _CameraSample_ for current sample CameraSample cameraSample = tileSampler->GetCameraSample(pixel); // Generate camera ray for current sample RayDifferential ray; Float rayWeight = camera->GenerateRayDifferential(cameraSample, &ray); ray.ScaleDifferentials( 1 / std::sqrt((Float)tileSampler->samplesPerPixel)); ++nCameraRays; // Evaluate radiance along camera ray Spectrum L(0.f); if (rayWeight > 0) L = Li(ray, scene, *tileSampler, arena); // Issue warning if unexpected radiance value returned if (L.HasNaNs()) { Error( "Not-a-number radiance value returned " "for image sample. Setting to black."); L = Spectrum(0.f); } else if (L.y() < -1e-5) { Error( "Negative luminance value, %f, returned " "for image sample. Setting to black.", L.y()); L = Spectrum(0.f); } else if (std::isinf(L.y())) { Error( "Infinite luminance value returned " "for image sample. Setting to black."); L = Spectrum(0.f); } // Add camera ray's contribution to image filmTile->AddSample(cameraSample.pFilm, L, rayWeight); // Free _MemoryArena_ memory from computing image sample // value arena.Reset(); } while (tileSampler->StartNextSample()); } // Merge image tile into _Film_ camera->film->MergeFilmTile(std::move(filmTile)); reporter.Update(); }, nTiles); reporter.Done(); } // Save final image after rendering camera->film->WriteImage(); }
void renderBlock(const Scene *scene, const Sensor *sensor, Sampler *sampler, ImageBlock *block, const bool &stop, const std::vector< TPoint2<uint8_t> > &points) const { Float diffScaleFactor = 1.0f / std::sqrt((Float)sampler->getSampleCount()); bool needsApertureSample = sensor->needsApertureSample(); bool needsTimeSample = sensor->needsTimeSample(); RadianceQueryRecord rRec(scene, sampler); Point2 apertureSample(0.5f); Float timeSample = 0.5f; RayDifferential sensorRay; block->clear(); uint32_t queryType = RadianceQueryRecord::ESensorRay; if (!sensor->getFilm()->hasAlpha()) /* Don't compute an alpha channel if we don't have to */ queryType &= ~RadianceQueryRecord::EOpacity; for (size_t i = 0; i < points.size(); ++i) { Point2i offset = Point2i(points[i]) + Vector2i(block->getOffset()); int index = offset.x + offset.y * width; if (stop) break; sampler->generate(offset); Float cntLdA = 0.f; std::vector<Float> cntLdW(m_numLobes, 0.f); for (size_t j = 0; j < sampler->getSampleCount(); j++) { rRec.newQuery(queryType, sensor->getMedium()); Point2 samplePos(Point2(offset) + Vector2(rRec.nextSample2D())); if (needsApertureSample) apertureSample = rRec.nextSample2D(); if (needsTimeSample) timeSample = rRec.nextSample1D(); Spectrum spec = sensor->sampleRayDifferential( sensorRay, samplePos, apertureSample, timeSample); sensorRay.scaleDifferential(diffScaleFactor); Spectrum oneTdA(0.f); Spectrum oneLdA(0.f); std::vector<Spectrum> oneTdW(m_numLobes, Spectrum(0.f)); std::vector<Spectrum> oneLdW(m_numLobes, Spectrum(0.f)); int albedoSegs = 0; spec *= Li(sensorRay, rRec, oneTdA, oneLdA, oneTdW, oneLdW, albedoSegs); block->put(samplePos, spec, rRec.alpha); bool goodSample = true; for (int c = 0; c < 3; c++) { if (!std::isfinite(oneLdA[c]) || oneLdA[c] < 0) { goodSample = false; break; } } if (goodSample) { LdA[index] += oneLdA; cntLdA += 1.f; } for (int k = 0; k < m_numLobes; k++) { goodSample = true; for (int c = 0; c < 3; c++) { if (!std::isfinite(oneLdW[k][c]) || oneLdW[k][c] < 0) { goodSample = false; break; } } if (goodSample) { LdW[k][index] += oneLdW[k]; cntLdW[k] += 1.f; } } imageSeg[index] |= albedoSegs; sampler->advance(); } if (cntLdA > 0.f) { LdA[index] /= cntLdA; } else { LdA[index] = Spectrum(0.f); } for (int k = 0; k < m_numLobes; k++) { if (cntLdW[k] > 0.f) { LdW[k][index] /= cntLdW[k]; } else { LdW[k][index] = Spectrum(0.f); } } } Float *data = new Float[(int)points.size() * 3]; std::string outfile = prefix + formatString("LdA_%03i_%03i.pfm", block->getOffset().x, block->getOffset().y); for (int i = 0; i < points.size(); i++) { Point2i p = Point2i(points[i]); int localIndex = p.x + p.y * block->getWidth(); Point2i offset = p + Vector2i(block->getOffset()); int globalIndex = offset.x + offset.y * width; Spectrum color(LdA[globalIndex]); for (int c = 0; c < 3; c++) { data[3 * localIndex + c] = color[c]; } } savePfm(outfile.c_str(), data, block->getWidth(), block->getHeight()); for (int k = 0; k < m_numLobes; k++) { outfile = prefix + formatString("LdW_l%02i_%03i_%03i.pfm", k, block->getOffset().x, block->getOffset().y); for (int i = 0; i < points.size(); i++) { Point2i p = Point2i(points[i]); int localIndex = p.x + p.y * block->getWidth(); Point2i offset = p + Vector2i(block->getOffset()); int globalIndex = offset.x + offset.y * width; Spectrum color(LdW[k][globalIndex]); for (int c = 0; c < 3; c++) { data[3 * localIndex + c] = color[c]; } } savePfm(outfile.c_str(), data, block->getWidth(), block->getHeight()); } outfile = prefix + formatString("image_seg_%03i_%03i.pfm", block->getOffset().x, block->getOffset().y); for (int i = 0; i < points.size(); i++) { Point2i p = Point2i(points[i]); int localIndex = p.x + p.y * block->getWidth(); Point2i offset = p + Vector2i(block->getOffset()); int globalIndex = offset.x + offset.y * width; Spectrum color(imageSeg[globalIndex]); for (int c = 0; c < 3; c++) { data[3 * localIndex + c] = color[c]; } } savePfm(outfile.c_str(), data, block->getWidth(), block->getHeight()); /* outfile = formatString("TdA_%03i_%03i.pfm", block->getOffset().x, block->getOffset().y); for (int i = 0; i < points.size(); i++) { Point2i p = Point2i(points[i]); int localIndex = p.x + p.y * block->getWidth(); Point2i offset = p + Vector2i(block->getOffset()); int globalIndex = offset.x + offset.y * width; Spectrum color(TdA[globalIndex] / Float(spp)); for (int c = 0; c < 3; c++) { data[3 * localIndex + c] = color[c]; } } savePfm(outfile.c_str(), data, block->getWidth(), block->getHeight()); */ delete[] data; }