void ArpMenuField::initialize() { if( ParamSet() ) { ParamSet()->AddParameters(parameters_menufield, &suite_menufield); PV_MenuBackColor.Init(ParamSet(), MenuBackColorP, ui_color(B_PANEL_BACKGROUND_COLOR), "MenuBackColor"); rgb_color black; black.red = black.green = black.blue = 0; black.alpha = 255; PV_MenuForeColor.Init(ParamSet(), MenuForeColorP, black, "MenuTextColor"); PV_MenuFont.Init(ParamSet(), MenuFontP, *BasicFont(), "PlainFont"); } SetBodyFill(ArpWest); }
void ArpTextControl::initialize() { if( ParamSet() ) { ParamSet()->AddParameters(parameters_textcontrol, &suite_textcontrol); PV_FillBackColor.Init(ParamSet(), FillBackColorP, tint_color( ui_color(B_PANEL_BACKGROUND_COLOR), B_LIGHTEN_MAX_TINT ), "FillBackColor"); rgb_color black; black.red = black.green = black.blue = 0; black.alpha = 255; PV_FillForeColor.Init(ParamSet(), FillForeColorP, black, "FillTextColor"); PV_FillFont.Init(ParamSet(), FillFontP, *BasicFont(), "PlainFont"); PV_MinTextString.Init(ParamSet(), MinTextStringP, ""); PV_PrefTextString.Init(ParamSet(), PrefTextStringP, ""); PV_MaxTextString.Init(ParamSet(), MaxTextStringP, ""); } SetBodyFill(ArpWest); }
RequestUrlActionData(QString address = QString(), QString message = QString(), ParamSet params = ParamSet()) : _address(address), _message(message), _params(params) { }
void BDPTIntegrator::Render(const Scene &scene) { std::unique_ptr<LightDistribution> lightDistribution = CreateLightSampleDistribution(lightSampleStrategy, scene); // Compute a reverse mapping from light pointers to offsets into the // scene lights vector (and, equivalently, offsets into // lightDistr). Added after book text was finalized; this is critical // to reasonable performance with 100s+ of light sources. std::unordered_map<const Light *, size_t> lightToIndex; for (size_t i = 0; i < scene.lights.size(); ++i) lightToIndex[scene.lights[i].get()] = i; // Partition the image into tiles Film *film = camera->film; const Bounds2i sampleBounds = film->GetSampleBounds(); const Vector2i sampleExtent = sampleBounds.Diagonal(); const int tileSize = 16; const int nXTiles = (sampleExtent.x + tileSize - 1) / tileSize; const int nYTiles = (sampleExtent.y + tileSize - 1) / tileSize; ProgressReporter reporter(nXTiles * nYTiles, "Rendering"); // Allocate buffers for debug visualization const int bufferCount = (1 + maxDepth) * (6 + maxDepth) / 2; std::vector<std::unique_ptr<Film>> weightFilms(bufferCount); if (visualizeStrategies || visualizeWeights) { for (int depth = 0; depth <= maxDepth; ++depth) { for (int s = 0; s <= depth + 2; ++s) { int t = depth + 2 - s; if (t == 0 || (s == 1 && t == 1)) continue; std::string filename = StringPrintf("bdpt_d%02i_s%02i_t%02i.exr", depth, s, t); weightFilms[BufferIndex(s, t)] = std::unique_ptr<Film>(new Film( film->fullResolution, Bounds2f(Point2f(0, 0), Point2f(1, 1)), std::unique_ptr<Filter>(CreateBoxFilter(ParamSet())), film->diagonal * 1000, filename, 1.f)); } } } // Render and write the output image to disk if (scene.lights.size() > 0) { ParallelFor2D([&](const Point2i tile) { // Render a single tile using BDPT MemoryArena arena; int seed = tile.y * nXTiles + tile.x; std::unique_ptr<Sampler> tileSampler = sampler->Clone(seed); int x0 = sampleBounds.pMin.x + tile.x * tileSize; int x1 = std::min(x0 + tileSize, sampleBounds.pMax.x); int y0 = sampleBounds.pMin.y + tile.y * tileSize; int y1 = std::min(y0 + tileSize, sampleBounds.pMax.y); Bounds2i tileBounds(Point2i(x0, y0), Point2i(x1, y1)); std::unique_ptr<FilmTile> filmTile = camera->film->GetFilmTile(tileBounds); for (Point2i pPixel : tileBounds) { tileSampler->StartPixel(pPixel); if (!InsideExclusive(pPixel, pixelBounds)) continue; do { // Generate a single sample using BDPT Point2f pFilm = (Point2f)pPixel + tileSampler->Get2D(); // Trace the camera subpath Vertex *cameraVertices = arena.Alloc<Vertex>(maxDepth + 2); Vertex *lightVertices = arena.Alloc<Vertex>(maxDepth + 1); int nCamera = GenerateCameraSubpath( scene, *tileSampler, arena, maxDepth + 2, *camera, pFilm, cameraVertices); // Get a distribution for sampling the light at the // start of the light subpath. Because the light path // follows multiple bounces, basing the sampling // distribution on any of the vertices of the camera // path is unlikely to be a good strategy. We use the // PowerLightDistribution by default here, which // doesn't use the point passed to it. const Distribution1D *lightDistr = lightDistribution->Lookup(cameraVertices[0].p()); // Now trace the light subpath int nLight = GenerateLightSubpath( scene, *tileSampler, arena, maxDepth + 1, cameraVertices[0].time(), *lightDistr, lightToIndex, lightVertices); // Execute all BDPT connection strategies Spectrum L(0.f); for (int t = 1; t <= nCamera; ++t) { for (int s = 0; s <= nLight; ++s) { int depth = t + s - 2; if ((s == 1 && t == 1) || depth < 0 || depth > maxDepth) continue; // Execute the $(s, t)$ connection strategy and // update _L_ Point2f pFilmNew = pFilm; Float misWeight = 0.f; Spectrum Lpath = ConnectBDPT( scene, lightVertices, cameraVertices, s, t, *lightDistr, lightToIndex, *camera, *tileSampler, &pFilmNew, &misWeight); VLOG(2) << "Connect bdpt s: " << s <<", t: " << t << ", Lpath: " << Lpath << ", misWeight: " << misWeight; if (visualizeStrategies || visualizeWeights) { Spectrum value; if (visualizeStrategies) value = misWeight == 0 ? 0 : Lpath / misWeight; if (visualizeWeights) value = Lpath; weightFilms[BufferIndex(s, t)]->AddSplat( pFilmNew, value); } if (t != 1) L += Lpath; else film->AddSplat(pFilmNew, Lpath); } } VLOG(2) << "Add film sample pFilm: " << pFilm << ", L: " << L << ", (y: " << L.y() << ")"; filmTile->AddSample(pFilm, L); arena.Reset(); } while (tileSampler->StartNextSample()); } film->MergeFilmTile(std::move(filmTile)); reporter.Update(); }, Point2i(nXTiles, nYTiles)); reporter.Done(); } film->WriteImage(1.0f / sampler->samplesPerPixel); // Write buffers for debug visualization if (visualizeStrategies || visualizeWeights) { const Float invSampleCount = 1.0f / sampler->samplesPerPixel; for (size_t i = 0; i < weightFilms.size(); ++i) if (weightFilms[i]) weightFilms[i]->WriteImage(invSampleCount); } }
RequestTaskActionData(Scheduler *scheduler = 0, QString id = QString(), ParamSet params = ParamSet(), bool force = false) : ActionData(scheduler), _id(id), _overridingParams(params), _force(force) { }
void BDPTIntegrator::Render(const Scene &scene) { ProfilePhase p(Prof::IntegratorRender); // Compute _lightDistr_ for sampling lights proportional to power std::unique_ptr<Distribution1D> lightDistr = ComputeLightPowerDistribution(scene); // Partition the image into tiles Film *film = camera->film; const Bounds2i sampleBounds = film->GetSampleBounds(); const Vector2i sampleExtent = sampleBounds.Diagonal(); const int tileSize = 16; const int nXTiles = (sampleExtent.x + tileSize - 1) / tileSize; const int nYTiles = (sampleExtent.y + tileSize - 1) / tileSize; ProgressReporter reporter(nXTiles * nYTiles, "Rendering"); // Allocate buffers for debug visualization const int bufferCount = (1 + maxDepth) * (6 + maxDepth) / 2; std::vector<std::unique_ptr<Film>> weightFilms(bufferCount); if (visualizeStrategies || visualizeWeights) { for (int depth = 0; depth <= maxDepth; ++depth) { for (int s = 0; s <= depth + 2; ++s) { int t = depth + 2 - s; if (t == 0 || (s == 1 && t == 1)) continue; std::string filename = StringPrintf("bdpt_d%02i_s%02i_t%02i.exr", depth, s, t); weightFilms[BufferIndex(s, t)] = std::unique_ptr<Film>(new Film( film->fullResolution, Bounds2f(Point2f(0, 0), Point2f(1, 1)), std::unique_ptr<Filter>(CreateBoxFilter(ParamSet())), film->diagonal * 1000, filename, 1.f)); } } } // Render and write the output image to disk if (scene.lights.size() > 0) { StatTimer timer(&renderingTime); ParallelFor2D([&](const Point2i tile) { // Render a single tile using BDPT MemoryArena arena; int seed = tile.y * nXTiles + tile.x; std::unique_ptr<Sampler> tileSampler = sampler->Clone(seed); int x0 = sampleBounds.pMin.x + tile.x * tileSize; int x1 = std::min(x0 + tileSize, sampleBounds.pMax.x); int y0 = sampleBounds.pMin.y + tile.y * tileSize; int y1 = std::min(y0 + tileSize, sampleBounds.pMax.y); Bounds2i tileBounds(Point2i(x0, y0), Point2i(x1, y1)); std::unique_ptr<FilmTile> filmTile = camera->film->GetFilmTile(tileBounds); for (Point2i pPixel : tileBounds) { tileSampler->StartPixel(pPixel); if (!InsideExclusive(pPixel, pixelBounds)) continue; do { // Generate a single sample using BDPT Point2f pFilm = (Point2f)pPixel + tileSampler->Get2D(); // Trace the camera and light subpaths Vertex *cameraVertices = arena.Alloc<Vertex>(maxDepth + 2); Vertex *lightVertices = arena.Alloc<Vertex>(maxDepth + 1); int nCamera = GenerateCameraSubpath( scene, *tileSampler, arena, maxDepth + 2, *camera, pFilm, cameraVertices); int nLight = GenerateLightSubpath( scene, *tileSampler, arena, maxDepth + 1, cameraVertices[0].time(), *lightDistr, lightVertices); // Execute all BDPT connection strategies Spectrum L(0.f); for (int t = 1; t <= nCamera; ++t) { for (int s = 0; s <= nLight; ++s) { int depth = t + s - 2; if ((s == 1 && t == 1) || depth < 0 || depth > maxDepth) continue; // Execute the $(s, t)$ connection strategy and // update _L_ Point2f pFilmNew = pFilm; Float misWeight = 0.f; Spectrum Lpath = ConnectBDPT( scene, lightVertices, cameraVertices, s, t, *lightDistr, *camera, *tileSampler, &pFilmNew, &misWeight); if (visualizeStrategies || visualizeWeights) { Spectrum value; if (visualizeStrategies) value = misWeight == 0 ? 0 : Lpath / misWeight; if (visualizeWeights) value = Lpath; weightFilms[BufferIndex(s, t)]->AddSplat( pFilmNew, value); } if (t != 1) L += Lpath; else film->AddSplat(pFilmNew, Lpath); } } filmTile->AddSample(pFilm, L); arena.Reset(); } while (tileSampler->StartNextSample()); } film->MergeFilmTile(std::move(filmTile)); reporter.Update(); }, Point2i(nXTiles, nYTiles)); reporter.Done(); } film->WriteImage(1.0f / sampler->samplesPerPixel); // Write buffers for debug visualization if (visualizeStrategies || visualizeWeights) { const Float invSampleCount = 1.0f / sampler->samplesPerPixel; for (size_t i = 0; i < weightFilms.size(); ++i) if (weightFilms[i]) weightFilms[i]->WriteImage(invSampleCount); } }
// Image Pipeline Function Definitions void ApplyImagingPipeline(float *rgb, int xResolution, int yResolution, float *yWeight, float bloomRadius, float bloomWeight, const char *toneMapName, const ParamSet *toneMapParams, float gamma, float dither, int maxDisplayValue) { int nPix = xResolution * yResolution ; // Possibly apply bloom effect to image if (bloomRadius > 0.f && bloomWeight > 0.f) { // Compute image-space extent of bloom effect int bloomSupport = Float2Int(bloomRadius * max(xResolution, yResolution)); int bloomWidth = bloomSupport / 2; // Initialize bloom filter table float *bloomFilter = new float[bloomWidth * bloomWidth]; for (int i = 0; i < bloomWidth * bloomWidth; ++i) { float dist = sqrtf(float(i)) / float(bloomWidth); bloomFilter[i] = powf(max(0.f, 1.f - dist), 4.f); } // Apply bloom filter to image pixels float *bloomImage = new float[3*nPix]; ProgressReporter prog(yResolution, "Bloom filter"); //NOBOOK for (int y = 0; y < yResolution; ++y) { for (int x = 0; x < xResolution; ++x) { // Compute bloom for pixel _(x,y)_ // Compute extent of pixels contributing bloom int x0 = max(0, x - bloomWidth); int x1 = min(x + bloomWidth, xResolution - 1); int y0 = max(0, y - bloomWidth); int y1 = min(y + bloomWidth, yResolution - 1); int offset = y * xResolution + x; float sumWt = 0.; for (int by = y0; by <= y1; ++by) for (int bx = x0; bx <= x1; ++bx) { // Accumulate bloom from pixel $(bx,by)$ int dx = x - bx, dy = y - by; if (dx == 0 && dy == 0) continue; int dist2 = dx*dx + dy*dy; if (dist2 < bloomWidth * bloomWidth) { int bloomOffset = bx + by * xResolution; float wt = bloomFilter[dist2]; sumWt += wt; for (int j = 0; j < 3; ++j) bloomImage[3*offset+j] += wt * rgb[3*bloomOffset+j]; } } bloomImage[3*offset ] /= sumWt; bloomImage[3*offset+1] /= sumWt; bloomImage[3*offset+2] /= sumWt; } prog.Update(); //NOBOOK } prog.Done(); //NOBOOK // Mix bloom effect into each pixel for (int i = 0; i < 3 * nPix; ++i) rgb[i] = Lerp(bloomWeight, rgb[i], bloomImage[i]); // Free memory allocated for bloom effect delete[] bloomFilter; delete[] bloomImage; } // Apply tone reproduction to image ToneMap *toneMap = NULL; if (toneMapName) toneMap = MakeToneMap(toneMapName, toneMapParams ? *toneMapParams : ParamSet()); if (toneMap) { float maxDisplayY = 100.f; float *scale = new float[nPix], *lum = new float[nPix]; // Compute pixel luminance values float stdYWeight[3] = { 0.212671f, 0.715160f, 0.072169f }; if (!yWeight) yWeight = stdYWeight; for (int i = 0; i < nPix; ++i) lum[i] = 683.f * (yWeight[0] * rgb[3*i] + yWeight[1] * rgb[3*i+1] + yWeight[2] * rgb[3*i+2]); toneMap->Map(lum, xResolution, yResolution, maxDisplayY, scale); // Apple scale to pixels for tone mapping and map to $[0,1]$ float displayTo01 = 683.f / maxDisplayY; for (int i = 0; i < xResolution * yResolution; ++i) { rgb[3*i ] *= scale[i] * displayTo01; rgb[3*i+1] *= scale[i] * displayTo01; rgb[3*i+2] *= scale[i] * displayTo01; } delete[] scale; delete[] lum; } // Handle out-of-gamut RGB values for (int i = 0; i < nPix; ++i) { float m = max(rgb[3*i], max(rgb[3*i+1], rgb[3*i+2])); if (m > 1.f) for (int j = 0; j < 3; ++j) rgb[3*i+j] /= m; } // Apply gamma correction to image if (gamma != 1.f) { float invGamma = 1.f / gamma; for (int i = 0; i < 3*nPix; ++i) rgb[i] = powf(rgb[i], invGamma); } // Map image to display range for (int i = 0; i < 3*nPix; ++i) rgb[i] *= maxDisplayValue; // Dither image if (dither > 0.f) for (int i = 0; i < 3*nPix; ++i) rgb[i] += 2.f * dither * (RandomFloat() - .5f); }
ArpTextControl::~ArpTextControl() { if( ParamSet() ) ParamSet()->RemoveParameters(parameters_textcontrol); }
ArpMenuField::~ArpMenuField() { if( ParamSet() ) ParamSet()->RemoveParameters(parameters_menufield); }
ParamSet AlertSubscription::params() const { const AlertSubscriptionData *d = data(); return d ? d->_params : ParamSet(); }