コード例 #1
0
ファイル: zerotwosequence.cpp プロジェクト: Drooids/pbrt-v3
// ZeroTwoSequenceSampler Method Definitions
ZeroTwoSequenceSampler::ZeroTwoSequenceSampler(int64_t samplesPerPixel,
                                               int nSampledDimensions)
    : PixelSampler(RoundUpPow2(samplesPerPixel), nSampledDimensions) {
    if (!IsPowerOf2(samplesPerPixel))
        Warning(
            "Pixel samples being rounded up to power of 2 "
            "(from %" PRId64 " to %" PRId64 ").",
            samplesPerPixel, RoundUpPow2(samplesPerPixel));
}
コード例 #2
0
ファイル: igi.cpp プロジェクト: superBatbrat/pbrt-v1
// IGIIntegrator Implementation
IGIIntegrator::IGIIntegrator(int nl, int ns, float md,
		float rrt, float is) {
	nLightPaths = RoundUpPow2((u_int)nl);
	nLightSets = RoundUpPow2((u_int)ns);
	minDist2 = md * md;
	rrThreshold = rrt;
	indirectScale = is;
	maxSpecularDepth = 5;
	specularDepth = 0;
	virtualLights = new vector<VirtualLight>[nLightSets];
}
コード例 #3
0
ファイル: lowdiscrepancy.cpp プロジェクト: 3dglazer/pbm
// LDSampler Method Definitions
LDSampler::LDSampler(int xstart, int xend, int ystart, int yend, int ps,
                     float sopen, float sclose)
    : Sampler(xstart, xend, ystart, yend, RoundUpPow2(ps), sopen, sclose) {
    xPos = xPixelStart;
    yPos = yPixelStart;
    if (!IsPowerOf2(ps)) {
        Warning("Pixel samples being rounded up to power of 2");
        nPixelSamples = RoundUpPow2(ps);
    } else
        nPixelSamples = ps;
    sampleBuf = NULL;
}
コード例 #4
0
MetropolisRenderer::MetropolisRenderer(int perPixelSamples,
        int nboot, int dps, float lsp, bool dds, int mr, int md,
        Camera *c, bool db) {
    camera = c;

    nPixelSamples = perPixelSamples;
    float largeStepProbability = lsp;
    largeStepsPerPixel = max(1u, RoundUpPow2(largeStepProbability * nPixelSamples));
    if (largeStepsPerPixel >= nPixelSamples) largeStepsPerPixel /= 2;
    Assert(largeStepsPerPixel >= 1 && largeStepsPerPixel < nPixelSamples);
    if ((nPixelSamples % largeStepsPerPixel) != 0) {
        int origPixelSamples = nPixelSamples;
        nPixelSamples += largeStepsPerPixel - (nPixelSamples % largeStepsPerPixel);
        Warning("Rounding up to %d Metropolis samples per pixel (from %d)",
                nPixelSamples, origPixelSamples);
    }

    nBootstrap = nboot;
    nDirectPixelSamples = dps;

    maxDepth = md;
    maxConsecutiveRejects = mr;
    nTasksFinished  = 0;
    directLighting = dds ? new DirectLightingIntegrator(SAMPLE_ALL_UNIFORM, maxDepth) : NULL;
    bidirectional = db;
}
コード例 #5
0
ファイル: samplerrenderer.cpp プロジェクト: gmlealll/pbrt-v2
void SamplerRenderer::Render(const Scene *scene) {
    PBRT_FINISHED_PARSING();
    // Allow integrators to do pre-processing for the scene
    PBRT_STARTED_PREPROCESSING();
    surfaceIntegrator->Preprocess(scene, camera, this);
    volumeIntegrator->Preprocess(scene, camera, this);
    PBRT_FINISHED_PREPROCESSING();
    PBRT_STARTED_RENDERING();
    // Allocate and initialize _sample_
    Sample *sample = new Sample(sampler, surfaceIntegrator,
                                volumeIntegrator, scene);

    // Create and launch _SamplerRendererTask_s for rendering image

    // Compute number of _SamplerRendererTask_s to create for rendering
    int nPixels = camera->film->xResolution * camera->film->yResolution;
    int nTasks = max(32 * NumSystemCores(), nPixels / (16*16));
    nTasks = RoundUpPow2(nTasks);
    ProgressReporter reporter(nTasks, "Rendering");
    vector<Task *> renderTasks;
    for (int i = 0; i < nTasks; ++i)
        renderTasks.push_back(new SamplerRendererTask(scene, this, camera, sampler,
                                             reporter,
                                             sample, nTasks-1-i, nTasks));
    EnqueueTasks(renderTasks);
    WaitForAllTasks();
    for (uint32_t i = 0; i < renderTasks.size(); ++i)
        delete renderTasks[i];
    reporter.Done();
    PBRT_FINISHED_RENDERING();
    // Clean up after rendering and store final image
    delete sample;
    camera->film->WriteImage();
}
コード例 #6
0
ファイル: CrossNLMFilter.cpp プロジェクト: alex310110/pbrt
CrossNLMFilter::CrossNLMFilter(
            int _searchRadius,
            int _patchRadius,            
            vector<float> &sigmaR,
            const Feature &sigmaF, 
            int w, int h
            ) {  
    searchRadius = _searchRadius;
    searchWidth = 2*searchRadius+1;
    patchRadius = _patchRadius;
    patchWidth = 2*patchRadius+1;
    invPatchWidth = 1.f/patchWidth;
    invPatchSize = 1.f/(float)(patchWidth*patchWidth);
    for(size_t i = 0; i < sigmaR.size(); i++) {
        scaleR.push_back(sigmaR[i] <= 0.f ? 
                0.f : -0.5f/(sigmaR[i]*sigmaR[i]));
    }
    for(int i = 0; i < sigmaF.Size(); i++) {
        scaleF[i] = sigmaF[i] <= 0.f ?
            0.f : -0.5f/(sigmaF[i]*sigmaF[i]);
    }
    width = w; height = h;
    int nPixels = width*height;
    nTasks = max(32 * NumSystemCores(), nPixels / (16*16));
    nTasks = RoundUpPow2(nTasks);
}
コード例 #7
0
ファイル: light.cpp プロジェクト: AI42/OM3D
void Light::SHProject(const PbrtPoint &p, float pEpsilon, int lmax,
        const Scene *scene, bool computeLightVisibility, float time,
        RNG &rng, Spectrum *coeffs) const {
    for (int i = 0; i < SHTerms(lmax); ++i)
        coeffs[i] = 0.f;
    uint32_t ns = RoundUpPow2(nSamples);
    uint32_t scramble1D = rng.RandomUInt();
    uint32_t scramble2D[2] = { rng.RandomUInt(), rng.RandomUInt() };
    float *Ylm = ALLOCA(float, SHTerms(lmax));
    for (uint32_t i = 0; i < ns; ++i) {
        // Compute incident radiance sample from _light_, update SH _coeffs_
        float u[2], pdf;
        Sample02(i, scramble2D, u);
        LightSample lightSample(u[0], u[1], VanDerCorput(i, scramble1D));
        Vector wi;
        VisibilityTester vis;
        Spectrum Li = Sample_L(p, pEpsilon, lightSample, time, &wi, &pdf, &vis);
        if (!Li.IsBlack() && pdf > 0.f &&
            (!computeLightVisibility || vis.Unoccluded(scene))) {
            // Add light sample contribution to MC estimate of SH coefficients
            SHEvaluate(wi, lmax, Ylm);
            for (int j = 0; j < SHTerms(lmax); ++j)
                coeffs[j] += Li * Ylm[j] / (pdf * ns);
        }
    }
}
コード例 #8
0
ファイル: video.cpp プロジェクト: IkarusDowned/ValyriaTear
StillImage VideoEngine::CaptureScreen() throw(Exception)
{
    // Static variable used to make sure the capture has a unique name in the texture image map
    static uint32 capture_id = 0;

    StillImage screen_image;

    // Retrieve width/height of the viewport. viewport_dimensions[2] is the width, [3] is the height
    GLint viewport_dimensions[4];
    glGetIntegerv(GL_VIEWPORT, viewport_dimensions);
    screen_image.SetDimensions((float)viewport_dimensions[2], (float)viewport_dimensions[3]);

    // Set up the screen rectangle to copy
    ScreenRect screen_rect(0, viewport_dimensions[3], viewport_dimensions[2], viewport_dimensions[3]);

    // Create a new ImageTexture with a unique filename for this newly captured screen
    ImageTexture *new_image = new ImageTexture("capture_screen" + NumberToString(capture_id), "<T>", viewport_dimensions[2], viewport_dimensions[3]);
    new_image->AddReference();

    // Create a texture sheet of an appropriate size that can retain the capture
    TexSheet *temp_sheet = TextureManager->_CreateTexSheet(RoundUpPow2(viewport_dimensions[2]), RoundUpPow2(viewport_dimensions[3]), VIDEO_TEXSHEET_ANY, false);
    VariableTexSheet *sheet = dynamic_cast<VariableTexSheet *>(temp_sheet);

    // Ensure that texture sheet creation succeeded, insert the texture image into the sheet, and copy the screen into the sheet
    if(sheet == NULL) {
        delete new_image;
        throw Exception("could not create texture sheet to store captured screen", __FILE__, __LINE__, __FUNCTION__);
        screen_image.Clear();
        return screen_image;
    }
    if(sheet->InsertTexture(new_image) == false) {
        TextureManager->_RemoveSheet(sheet);
        delete new_image;
        throw Exception("could not insert captured screen image into texture sheet", __FILE__, __LINE__, __FUNCTION__);
        screen_image.Clear();
        return screen_image;
    }
    if(sheet->CopyScreenRect(0, 0, screen_rect) == false) {
        TextureManager->_RemoveSheet(sheet);
        delete new_image;
        throw Exception("call to TexSheet::CopyScreenRect() failed", __FILE__, __LINE__, __FUNCTION__);
        screen_image.Clear();
        return screen_image;
    }

    // Store the image element to the saved image (with a flipped y axis)
    screen_image._image_texture = new_image;
    screen_image._texture = new_image;

    // Vertically flip the texture image by swapping the v coordinates, since OpenGL returns the image upside down in the CopyScreenRect call
    float temp = new_image->v1;
    new_image->v1 = new_image->v2;
    new_image->v2 = temp;

    ++capture_id;
    return screen_image;
}
コード例 #9
0
// LDSampler Method Definitions
LDSampler::LDSampler(int xstart, int xend,
		int ystart, int yend, int ps)
	: Sampler(xstart, xend, ystart, yend, RoundUpPow2(ps)) {
	xPos = xPixelStart - 1;
	yPos = yPixelStart;
	if (!IsPowerOf2(ps)) {
		Warning("Pixel samples being"
		        " rounded up to power of 2");
		pixelSamples = RoundUpPow2(ps);
	}
	else
		pixelSamples = ps;
	samplePos = pixelSamples;
	oneDSamples = twoDSamples = NULL;
	imageSamples = new float[5*pixelSamples];
	lensSamples = imageSamples + 2*pixelSamples;
	timeSamples = imageSamples + 4*pixelSamples;
	n1D = n2D = 0;
}
コード例 #10
0
// AdaptiveSampler Method Definitions
AdaptiveSampler::AdaptiveSampler(int xstart, int xend,
                     int ystart, int yend, int mins, int maxs, const string &m,
                     float sopen, float sclose)
    : Sampler(xstart, xend, ystart, yend, RoundUpPow2(maxSamples),
              sopen, sclose) {
    xPos = xPixelStart;
    yPos = yPixelStart;
    supersamplePixel = false;
    if (mins > maxs) std::swap(mins, maxs);

    if (!IsPowerOf2(mins)) {
        Warning("Minimum pixel samples being rounded up to power of 2");
        minSamples = RoundUpPow2(mins);
    }
    else
        minSamples = mins;
    if (!IsPowerOf2(maxs)) {
        Warning("Maximum pixel samples being rounded up to power of 2");
        maxSamples = RoundUpPow2(maxs);
    }
    else
        maxSamples = maxs;

    if (minSamples < 2) {
        Warning("Adaptive sampler needs at least two initial pixel samples.  Using two.");
        minSamples = 2;
    }
    if (minSamples == maxSamples) {
        maxSamples *= 2;
        Warning("Adaptive sampler must have more maximum samples than minimum.  Using %d - %d",
                minSamples, maxSamples);
    }
    if (m == "contrast") method = ADAPTIVE_CONTRAST_THRESHOLD;
    else if (m == "shapeid") method = ADAPTIVE_COMPARE_SHAPE_ID;
    else {
        Warning("Adaptive sampling metric \"%s\" unknown.  Using \"contrast\".",
                m.c_str());
        method = ADAPTIVE_CONTRAST_THRESHOLD;
    }
    sampleBuf = NULL;
}
コード例 #11
0
ファイル: LifoAlloc.cpp プロジェクト: LyeSS/mozilla-central
BumpChunk *
BumpChunk::new_(size_t chunkSize)
{
    JS_ASSERT(RoundUpPow2(chunkSize) == chunkSize);
    void *mem = js_malloc(chunkSize);
    if (!mem)
        return NULL;
    BumpChunk *result = new (mem) BumpChunk(chunkSize - sizeof(BumpChunk));

    /* 
     * We assume that the alignment of sAlign is less than that of
     * the underlying memory allocator -- creating a new BumpChunk should
     * always satisfy the sAlign alignment constraint.
     */
    JS_ASSERT(AlignPtr(result->bump) == result->bump);
    return result;
}
コード例 #12
0
ファイル: GLUploadHelpers.cpp プロジェクト: bgrins/gecko-dev
static void
TexImage2DHelper(GLContext* gl,
                 GLenum target, GLint level, GLint internalformat,
                 GLsizei width, GLsizei height, GLsizei stride,
                 GLint pixelsize, GLint border, GLenum format,
                 GLenum type, const GLvoid* pixels)
{
    if (gl->IsGLES()) {

        NS_ASSERTION(format == (GLenum)internalformat,
                    "format and internalformat not the same for glTexImage2D on GLES2");

        MOZ_ASSERT(width >= 0 && height >= 0);
        if (!CanUploadNonPowerOfTwo(gl)
            && (stride != width * pixelsize
            || !IsPowerOfTwo((uint32_t)width)
            || !IsPowerOfTwo((uint32_t)height))) {

            // Pad out texture width and height to the next power of two
            // as we don't support/want non power of two texture uploads
            GLsizei paddedWidth = RoundUpPow2((uint32_t)width);
            GLsizei paddedHeight = RoundUpPow2((uint32_t)height);

            GLvoid* paddedPixels = new unsigned char[paddedWidth * paddedHeight * pixelsize];

            // Pad out texture data to be in a POT sized buffer for uploading to
            // a POT sized texture
            CopyAndPadTextureData(pixels, paddedPixels, width, height,
                                  paddedWidth, paddedHeight, stride, pixelsize);

            gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT,
                             std::min(GetAddressAlignment((ptrdiff_t)paddedPixels),
                                      GetAddressAlignment((ptrdiff_t)paddedWidth * pixelsize)));
            gl->fTexImage2D(target,
                            border,
                            internalformat,
                            paddedWidth,
                            paddedHeight,
                            border,
                            format,
                            type,
                            paddedPixels);
            gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, 4);

            delete[] static_cast<unsigned char*>(paddedPixels);
            return;
        }

        if (stride == width * pixelsize) {
            gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT,
                             std::min(GetAddressAlignment((ptrdiff_t)pixels),
                                      GetAddressAlignment((ptrdiff_t)stride)));
            gl->fTexImage2D(target,
                            border,
                            internalformat,
                            width,
                            height,
                            border,
                            format,
                            type,
                            pixels);
            gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, 4);
        } else {
            // Use GLES-specific workarounds for GL_UNPACK_ROW_LENGTH; these are
            // implemented in TexSubImage2D.
            gl->fTexImage2D(target,
                            border,
                            internalformat,
                            width,
                            height,
                            border,
                            format,
                            type,
                            nullptr);
            TexSubImage2DHelper(gl,
                                target,
                                level,
                                0,
                                0,
                                width,
                                height,
                                stride,
                                pixelsize,
                                format,
                                type,
                                pixels);
        }
    } else {
        // desktop GL (non-ES) path

        gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT,
                         std::min(GetAddressAlignment((ptrdiff_t)pixels),
                                  GetAddressAlignment((ptrdiff_t)stride)));
        int rowLength = stride/pixelsize;
        gl->fPixelStorei(LOCAL_GL_UNPACK_ROW_LENGTH, rowLength);
        gl->fTexImage2D(target,
                        level,
                        internalformat,
                        width,
                        height,
                        border,
                        format,
                        type,
                        pixels);
        gl->fPixelStorei(LOCAL_GL_UNPACK_ROW_LENGTH, 0);
        gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, 4);
    }
}
コード例 #13
0
ファイル: video.cpp プロジェクト: IkarusDowned/ValyriaTear
StillImage VideoEngine::CreateImage(ImageMemory *raw_image, const std::string &image_name, bool delete_on_exist) throw(Exception)
{
    //the returning image
    StillImage still_image;

    //check if the raw_image pointer is valid
    if(!raw_image)
    {
        throw Exception("raw_image is NULL, cannot create a StillImage", __FILE__, __LINE__, __FUNCTION__);
        return still_image;
    }

    still_image.SetDimensions(raw_image->width, raw_image->height);

    //Check to see if the image_name exists
    if(TextureManager->_IsImageTextureRegistered(image_name))
    {
        //if we are allowed to delete, then we remove the texture
        if(delete_on_exist)
        {
            ImageTexture* old = TextureManager->_GetImageTexture(image_name);
            TextureManager->_UnregisterImageTexture(old);
            if(old->RemoveReference())
                delete old;
        }
        else
        {
            throw Exception("image already exists in texture manager", __FILE__, __LINE__, __FUNCTION__);
            return still_image;
        }
    }

    //create a new texture image. the next few steps are similar to CaptureImage, so in the future
    // we may want to do a code-cleanup
    ImageTexture *new_image = new ImageTexture(image_name, "<T>", raw_image->width, raw_image->height);
    new_image->AddReference();
    // Create a texture sheet of an appropriate size that can retain the capture
    TexSheet *temp_sheet = TextureManager->_CreateTexSheet(RoundUpPow2(raw_image->width), RoundUpPow2(raw_image->height), VIDEO_TEXSHEET_ANY, false);
    VariableTexSheet *sheet = dynamic_cast<VariableTexSheet *>(temp_sheet);

    // Ensure that texture sheet creation succeeded, insert the texture image into the sheet, and copy the screen into the sheet
    if(sheet == NULL) {
        delete new_image;
        throw Exception("could not create texture sheet to store still image", __FILE__, __LINE__, __FUNCTION__);
        return still_image;
    }

    if(sheet->InsertTexture(new_image) == false)
    {
        TextureManager->_RemoveSheet(sheet);
        delete new_image;
        throw Exception("could not insert raw image into texture sheet", __FILE__, __LINE__, __FUNCTION__);
        return still_image;
    }

    if(sheet->CopyRect(0, 0, *raw_image) == false)
    {
        TextureManager->_RemoveSheet(sheet);
        delete new_image;
        throw Exception("call to TexSheet::CopyRect() failed", __FILE__, __LINE__, __FUNCTION__);
        still_image.Clear();
        return still_image;
    }

    // Store the image element to the saved image (with a flipped y axis)
    still_image._image_texture = new_image;
    still_image._texture = new_image;
    return still_image;
}
コード例 #14
0
ファイル: text.cpp プロジェクト: AMDmi3/ValyriaTear
void TextSupervisor::_CacheGlyphs(const uint16 *text, FontProperties *fp)
{
    if(fp == NULL) {
        IF_PRINT_WARNING(VIDEO_DEBUG) << "FontProperties argument was null" << std::endl;
        return;
    }

    // Empty string means there are no glyphs to cache
    if(*text == 0) {
        return;
    }

    static const SDL_Color glyph_color = { 0xFF, 0xFF, 0xFF, 0xFF }; // Opaque white color
    static const uint16 fall_back_glyph = '?'; // If we can't cache a particular glyph, we fall back to this one

    TTF_Font *font = fp->ttf_font;
    SDL_Surface *initial = NULL;
    SDL_Surface *intermediary = NULL;
    int32 w, h;
    GLuint texture;

    // Go through each character in the string and cache those glyphs that have not already been cached
    for(const uint16 *character_ptr = text; *character_ptr != 0; ++character_ptr) {
        // A reference for legibility
        const uint16 &character = *character_ptr;

        // Update the glyph cache when needed
        if(character >= fp->glyph_cache->size())
            fp->glyph_cache->resize(character + 1, 0);

        // Check if the glyph is already cached. If so, move on to the next character
        if(fp->glyph_cache->at(character) != 0)
            continue;

        // Attempt to create the initial SDL_Surface that contains the rendered glyph
        initial = TTF_RenderGlyph_Blended(font, character, glyph_color);
        if(initial == NULL) {
            IF_PRINT_WARNING(VIDEO_DEBUG) << "call to TTF_RenderGlyph_Blended() failed, resorting to fall back glyph: '?'" << std::endl;
            initial = TTF_RenderGlyph_Blended(font, fall_back_glyph, glyph_color);
            if(initial == NULL) {
                IF_PRINT_WARNING(VIDEO_DEBUG) << "call to TTF_RenderGlyph_Blended() failed for fall back glyph, aborting glyph caching" << std::endl;
                return;
            }
        }

        w = RoundUpPow2(initial->w + 1);
        h = RoundUpPow2(initial->h + 1);

        intermediary = SDL_CreateRGBSurface(0, w, h, 32, RMASK, GMASK, BMASK, AMASK);
        if(intermediary == NULL) {
            SDL_FreeSurface(initial);
            IF_PRINT_WARNING(VIDEO_DEBUG) << "call to SDL_CreateRGBSurface() failed" << std::endl;
            return;
        }


        if(SDL_BlitSurface(initial, 0, intermediary, 0) < 0) {
            SDL_FreeSurface(initial);
            SDL_FreeSurface(intermediary);
            IF_PRINT_WARNING(VIDEO_DEBUG) << "call to SDL_BlitSurface() failed" << std::endl;
            return;
        }

        glGenTextures(1, &texture);
        TextureManager->_BindTexture(texture);


        SDL_LockSurface(intermediary);

        uint32 num_bytes = w * h * 4;
        for(uint32 j = 0; j < num_bytes; j += 4) {
            (static_cast<uint8 *>(intermediary->pixels))[j + 3] = (static_cast<uint8 *>(intermediary->pixels))[j + 2];
            (static_cast<uint8 *>(intermediary->pixels))[j + 0] = 0xff;
            (static_cast<uint8 *>(intermediary->pixels))[j + 1] = 0xff;
            (static_cast<uint8 *>(intermediary->pixels))[j + 2] = 0xff;
        }

        glTexImage2D(GL_TEXTURE_2D, 0, 4, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, intermediary->pixels);
        SDL_UnlockSurface(intermediary);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

        if(VideoManager->CheckGLError()) {
            SDL_FreeSurface(initial);
            SDL_FreeSurface(intermediary);
            IF_PRINT_WARNING(VIDEO_DEBUG) << "an OpenGL error was detected: " << VideoManager->CreateGLErrorString() << std::endl;
            return;
        }

        int minx, maxx;
        int miny, maxy;
        int advance;
        if(TTF_GlyphMetrics(font, character, &minx, &maxx, &miny, &maxy, &advance) != 0) {
            SDL_FreeSurface(initial);
            SDL_FreeSurface(intermediary);
            IF_PRINT_WARNING(VIDEO_DEBUG) << "call to TTF_GlyphMetrics() failed" << std::endl;
            return;
        }

        FontGlyph *glyph = new FontGlyph;
        glyph->texture = texture;
        glyph->min_x = minx;
        glyph->min_y = miny;
        glyph->top_y = fp->ascent - maxy;
        glyph->width = initial->w + 1;
        glyph->height = initial->h + 1;
        glyph->max_x = static_cast<float>(initial->w + 1) / static_cast<float>(w);
        glyph->max_y = static_cast<float>(initial->h + 1) / static_cast<float>(h);
        glyph->advance = advance;

        (*fp->glyph_cache)[character] = glyph;

        SDL_FreeSurface(initial);
        SDL_FreeSurface(intermediary);
    }
} // void TextSupervisor::_CacheGlyphs(const uint16* text, FontProperties* fp)
コード例 #15
0
TexSheet* TextureController::_InsertImageInTexSheet(BaseTexture *image, ImageMemory& load_info, bool is_static) {
	// Image sizes larger than 512 in either dimension require their own texture sheet
	if (load_info.width > 512 || load_info.height > 512) {
		int32 round_width = RoundUpPow2(load_info.width);
		int32 round_height = RoundUpPow2(load_info.height);
		TexSheet* sheet = _CreateTexSheet(round_width, round_height, VIDEO_TEXSHEET_ANY, false);

		// Ran out of memory!
		if (sheet == NULL) {
			IF_PRINT_WARNING(VIDEO_DEBUG) << "could not create new texture sheet for image" << endl;
			return NULL;
		}

		if (sheet->AddTexture(image, load_info) == true)
			return sheet;
		else {
			IF_PRINT_WARNING(VIDEO_DEBUG) << "TexSheet::AddTexture returned false when trying to insert a large image" << endl;
			return NULL;
		}
	}

	// Determine the type of texture sheet that should hold this image
	TexSheetType type;

	if (load_info.width == 32 && load_info.height == 32)
		type = VIDEO_TEXSHEET_32x32;
	else if (load_info.width == 32 && load_info.height == 64)
		type = VIDEO_TEXSHEET_32x64;
	else if (load_info.width == 64 && load_info.height == 64)
		type = VIDEO_TEXSHEET_64x64;
	else
		type = VIDEO_TEXSHEET_ANY;

	// Look through all existing texture sheets and see if the image will fit in any of the ones which
	// match the type and static status that we are looking for
	for (uint32 i = 0; i < _tex_sheets.size(); i++) {
		TexSheet* sheet = _tex_sheets[i];
		if (sheet == NULL) {
			IF_PRINT_WARNING(VIDEO_DEBUG) << "found a NULL texture sheet in the _tex_sheets container" << endl;
			continue;
		}

		if (sheet->type == type && sheet->is_static == is_static) {
			if (sheet->AddTexture(image, load_info) == true) {
				return sheet;
			}
		}
	}

	// We couldn't add it to any existing sheets, so we must create a new one for it
	TexSheet *sheet = _CreateTexSheet(512, 512, type, is_static);
	if (sheet == NULL) {
		IF_PRINT_WARNING(VIDEO_DEBUG) << "failed to create a new texture sheet for image" << endl;
		return NULL;
	}

	// AddTexture should always work here. If not, there is a serious problem
	if (sheet->AddTexture(image, load_info)) {
		return sheet;
	}
	else {
		IF_PRINT_WARNING(VIDEO_DEBUG) << "all attempts to add image to a texture sheet have failed" << endl;
		return NULL;
	}
} // TexSheet* TextureController::_InsertImageInTexSheet(BaseImage *image, ImageMemory& load_info, bool is_static)
コード例 #16
0
void DipoleSubsurfaceIntegrator::Preprocess(const Scene *scene,
        const Camera *camera, const Renderer *renderer) {
    if (scene->lights.size() == 0) return;
    vector<SurfacePoint> pts;
    // Get _SurfacePoint_s for translucent objects in scene
    if (filename != "") {
        // Initialize _SurfacePoint_s from file
        vector<float> fpts;
        if (ReadFloatFile(filename.c_str(), &fpts)) {
            if ((fpts.size() % 8) != 0)
                Error("Excess values (%d) in points file \"%s\"", int(fpts.size() % 8),
                      filename.c_str());
            for (u_int i = 0; i < fpts.size(); i += 8)
                pts.push_back(SurfacePoint(Point(fpts[i], fpts[i+1], fpts[i+2]),
                                           Normal(fpts[i+3], fpts[i+4], fpts[i+5]),
                                           fpts[i+6], fpts[i+7]));
        }
    }
    if (pts.size() == 0) {
        Point pCamera = camera->CameraToWorld(camera->shutterOpen,
                                              Point(0, 0, 0));
        FindPoissonPointDistribution(pCamera, camera->shutterOpen,
                                     minSampleDist, scene, &pts);
    }

    // Compute irradiance values at sample points
    RNG rng;
    MemoryArena arena;
    PBRT_SUBSURFACE_STARTED_COMPUTING_IRRADIANCE_VALUES();
    ProgressReporter progress(pts.size(), "Computing Irradiances");
    for (uint32_t i = 0; i < pts.size(); ++i) {
        SurfacePoint &sp = pts[i];
        Spectrum E(0.f);
        for (uint32_t j = 0; j < scene->lights.size(); ++j) {
            // Add irradiance from light at point
            const Light *light = scene->lights[j];
            Spectrum Elight = 0.f;
            int nSamples = RoundUpPow2(light->nSamples);
            uint32_t scramble[2] = { rng.RandomUInt(), rng.RandomUInt() };
            uint32_t compScramble = rng.RandomUInt();
            for (int s = 0; s < nSamples; ++s) {
                float lpos[2];
                Sample02(s, scramble, lpos);
                float lcomp = VanDerCorput(s, compScramble);
                LightSample ls(lpos[0], lpos[1], lcomp);
                Vector wi;
                float lightPdf;
                VisibilityTester visibility;
                Spectrum Li = light->Sample_L(sp.p, sp.rayEpsilon,
                    ls, camera->shutterOpen, &wi, &lightPdf, &visibility);
                if (Dot(wi, sp.n) <= 0.) continue;
                if (Li.IsBlack() || lightPdf == 0.f) continue;
                Li *= visibility.Transmittance(scene, renderer, NULL, rng, arena);
                if (visibility.Unoccluded(scene))
                    Elight += Li * AbsDot(wi, sp.n) / lightPdf;
            }
            E += Elight / nSamples;
        }
        if (E.y() > 0.f)
        {
            irradiancePoints.push_back(IrradiancePoint(sp, E));
            PBRT_SUBSURFACE_COMPUTED_IRRADIANCE_AT_POINT(&sp, &E);
        }
        arena.FreeAll();
        progress.Update();
    }
    progress.Done();
    PBRT_SUBSURFACE_FINISHED_COMPUTING_IRRADIANCE_VALUES();

    // Create octree of clustered irradiance samples
    octree = octreeArena.Alloc<SubsurfaceOctreeNode>();
    for (uint32_t i = 0; i < irradiancePoints.size(); ++i)
        octreeBounds = Union(octreeBounds, irradiancePoints[i].p);
    for (uint32_t i = 0; i < irradiancePoints.size(); ++i)
        octree->Insert(octreeBounds, &irradiancePoints[i], octreeArena);
    octree->InitHierarchy();
}
コード例 #17
0
ファイル: diffuseprt.cpp プロジェクト: jwzhang/pbrt-v2
// DiffusePRTIntegrator Method Definitions
DiffusePRTIntegrator::DiffusePRTIntegrator(int lm, int ns) {
    lmax = lm;
    nSamples = RoundUpPow2(ns);
    c_in = new Spectrum[SHTerms(lmax)];
}
コード例 #18
0
void
GLBlitTextureImageHelper::BlitTextureImage(TextureImage *aSrc, const gfx::IntRect& aSrcRect,
                                           TextureImage *aDst, const gfx::IntRect& aDstRect)
{
    GLContext *gl = mCompositor->gl();

    if (!aSrc || !aDst || aSrcRect.IsEmpty() || aDstRect.IsEmpty())
        return;

    int savedFb = 0;
    gl->fGetIntegerv(LOCAL_GL_FRAMEBUFFER_BINDING, &savedFb);

    ScopedGLState scopedScissorTestState(gl, LOCAL_GL_SCISSOR_TEST, false);
    ScopedGLState scopedBlendState(gl, LOCAL_GL_BLEND, false);

    // 2.0 means scale up by two
    float blitScaleX = float(aDstRect.width) / float(aSrcRect.width);
    float blitScaleY = float(aDstRect.height) / float(aSrcRect.height);

    // We start iterating over all destination tiles
    aDst->BeginBigImageIteration();
    do {
        // calculate portion of the tile that is going to be painted to
        gfx::IntRect dstSubRect;
        gfx::IntRect dstTextureRect = aDst->GetTileRect();
        dstSubRect.IntersectRect(aDstRect, dstTextureRect);

        // this tile is not part of the destination rectangle aDstRect
        if (dstSubRect.IsEmpty())
            continue;

        // (*) transform the rect of this tile into the rectangle defined by aSrcRect...
        gfx::IntRect dstInSrcRect(dstSubRect);
        dstInSrcRect.MoveBy(-aDstRect.TopLeft());
        // ...which might be of different size, hence scale accordingly
        dstInSrcRect.ScaleRoundOut(1.0f / blitScaleX, 1.0f / blitScaleY);
        dstInSrcRect.MoveBy(aSrcRect.TopLeft());

        SetBlitFramebufferForDestTexture(aDst->GetTextureID());
        UseBlitProgram();

        aSrc->BeginBigImageIteration();
        // now iterate over all tiles in the source Image...
        do {
            // calculate portion of the source tile that is in the source rect
            gfx::IntRect srcSubRect;
            gfx::IntRect srcTextureRect = aSrc->GetTileRect();
            srcSubRect.IntersectRect(aSrcRect, srcTextureRect);

            // this tile is not part of the source rect
            if (srcSubRect.IsEmpty()) {
                continue;
            }
            // calculate intersection of source rect with destination rect
            srcSubRect.IntersectRect(srcSubRect, dstInSrcRect);
            // this tile does not overlap the current destination tile
            if (srcSubRect.IsEmpty()) {
                continue;
            }
            // We now have the intersection of
            //     the current source tile
            // and the desired source rectangle
            // and the destination tile
            // and the desired destination rectange
            // in destination space.
            // We need to transform this back into destination space, inverting the transform from (*)
            gfx::IntRect srcSubInDstRect(srcSubRect);
            srcSubInDstRect.MoveBy(-aSrcRect.TopLeft());
            srcSubInDstRect.ScaleRoundOut(blitScaleX, blitScaleY);
            srcSubInDstRect.MoveBy(aDstRect.TopLeft());

            // we transform these rectangles to be relative to the current src and dst tiles, respectively
            gfx::IntSize srcSize = srcTextureRect.Size();
            gfx::IntSize dstSize = dstTextureRect.Size();
            srcSubRect.MoveBy(-srcTextureRect.x, -srcTextureRect.y);
            srcSubInDstRect.MoveBy(-dstTextureRect.x, -dstTextureRect.y);

            float dx0 = 2.0f * float(srcSubInDstRect.x) / float(dstSize.width) - 1.0f;
            float dy0 = 2.0f * float(srcSubInDstRect.y) / float(dstSize.height) - 1.0f;
            float dx1 = 2.0f * float(srcSubInDstRect.x + srcSubInDstRect.width) / float(dstSize.width) - 1.0f;
            float dy1 = 2.0f * float(srcSubInDstRect.y + srcSubInDstRect.height) / float(dstSize.height) - 1.0f;
            ScopedViewportRect autoViewportRect(gl, 0, 0, dstSize.width, dstSize.height);

            RectTriangles rects;

            gfx::IntSize realTexSize = srcSize;
            if (!CanUploadNonPowerOfTwo(gl)) {
                realTexSize = gfx::IntSize(RoundUpPow2(srcSize.width),
                                           RoundUpPow2(srcSize.height));
            }

            if (aSrc->GetWrapMode() == LOCAL_GL_REPEAT) {
                rects.addRect(/* dest rectangle */
                        dx0, dy0, dx1, dy1,
                        /* tex coords */
                        srcSubRect.x / float(realTexSize.width),
                        srcSubRect.y / float(realTexSize.height),
                        srcSubRect.XMost() / float(realTexSize.width),
                        srcSubRect.YMost() / float(realTexSize.height));
            } else {
                DecomposeIntoNoRepeatTriangles(srcSubRect, realTexSize, rects);

                // now put the coords into the d[xy]0 .. d[xy]1 coordinate space
                // from the 0..1 that it comes out of decompose
                InfallibleTArray<RectTriangles::coord>& coords = rects.vertCoords();

                for (unsigned int i = 0; i < coords.Length(); ++i) {
                    coords[i].x = (coords[i].x * (dx1 - dx0)) + dx0;
                    coords[i].y = (coords[i].y * (dy1 - dy0)) + dy0;
                }
            }

            ScopedBindTextureUnit autoTexUnit(gl, LOCAL_GL_TEXTURE0);
            ScopedBindTexture autoTex(gl, aSrc->GetTextureID());
            ScopedVertexAttribPointer autoAttrib0(gl, 0, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, 0, rects.vertCoords().Elements());
            ScopedVertexAttribPointer autoAttrib1(gl, 1, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, 0, rects.texCoords().Elements());

            gl->fDrawArrays(LOCAL_GL_TRIANGLES, 0, rects.elements());

        } while (aSrc->NextTile());
    } while (aDst->NextTile());

    // unbind the previous texture from the framebuffer
    SetBlitFramebufferForDestTexture(0);

    gl->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, savedFb);
}
コード例 #19
0
void MetropolisRenderer::Render(const Scene *scene) {
    PBRT_MLT_STARTED_RENDERING();
    if (scene->lights.size() > 0) {
        int x0, x1, y0, y1;
        camera->film->GetPixelExtent(&x0, &x1, &y0, &y1);
        float t0 = camera->shutterOpen, t1 = camera->shutterClose;
        Distribution1D *lightDistribution = ComputeLightSamplingCDF(scene);

        if (directLighting != NULL) {
            PBRT_MLT_STARTED_DIRECTLIGHTING();
            // Compute direct lighting before Metropolis light transport
            if (nDirectPixelSamples > 0) {
                LDSampler sampler(x0, x1, y0, y1, nDirectPixelSamples, t0, t1);
                Sample *sample = new Sample(&sampler, directLighting, NULL, scene);
                vector<Task *> directTasks;
                int nDirectTasks = max(32 * NumSystemCores(),
                                 (camera->film->xResolution * camera->film->yResolution) / (16*16));
                nDirectTasks = RoundUpPow2(nDirectTasks);
                ProgressReporter directProgress(nDirectTasks, "Direct Lighting");
                for (int i = 0; i < nDirectTasks; ++i)
                    directTasks.push_back(new SamplerRendererTask(scene, this, camera, directProgress,
                                                                  &sampler, sample, false, i, nDirectTasks));
                std::reverse(directTasks.begin(), directTasks.end());
                EnqueueTasks(directTasks);
                WaitForAllTasks();
                for (uint32_t i = 0; i < directTasks.size(); ++i)
                    delete directTasks[i];
                delete sample;
                directProgress.Done();
            }
            camera->film->WriteImage();
            PBRT_MLT_FINISHED_DIRECTLIGHTING();
        }
        // Take initial set of samples to compute $b$
        PBRT_MLT_STARTED_BOOTSTRAPPING(nBootstrap);
        RNG rng(0);
        MemoryArena arena;
        vector<float> bootstrapI;
        vector<PathVertex> cameraPath(maxDepth, PathVertex());
        vector<PathVertex> lightPath(maxDepth, PathVertex());
        float sumI = 0.f;
        bootstrapI.reserve(nBootstrap);
        MLTSample sample(maxDepth);
        for (uint32_t i = 0; i < nBootstrap; ++i) {
            // Generate random sample and path radiance for MLT bootstrapping
            float x = Lerp(rng.RandomFloat(), x0, x1);
            float y = Lerp(rng.RandomFloat(), y0, y1);
            LargeStep(rng, &sample, maxDepth, x, y, t0, t1, bidirectional);
            Spectrum L = PathL(sample, scene, arena, camera, lightDistribution,
                               &cameraPath[0], &lightPath[0], rng);

            // Compute contribution for random sample for MLT bootstrapping
            float I = ::I(L);
            sumI += I;
            bootstrapI.push_back(I);
            arena.FreeAll();
        }
        float b = sumI / nBootstrap;
        PBRT_MLT_FINISHED_BOOTSTRAPPING(b);
        Info("MLT computed b = %f", b);

        // Select initial sample from bootstrap samples
        float contribOffset = rng.RandomFloat() * sumI;
        rng.Seed(0);
        sumI = 0.f;
        MLTSample initialSample(maxDepth);
        for (uint32_t i = 0; i < nBootstrap; ++i) {
            float x = Lerp(rng.RandomFloat(), x0, x1);
            float y = Lerp(rng.RandomFloat(), y0, y1);
            LargeStep(rng, &initialSample, maxDepth, x, y, t0, t1,
                      bidirectional);
            sumI += bootstrapI[i];
            if (sumI > contribOffset)
                break;
        }

        // Launch tasks to generate Metropolis samples
        uint32_t nTasks = largeStepsPerPixel;
        uint32_t largeStepRate = nPixelSamples / largeStepsPerPixel;
        Info("MLT running %d tasks, large step rate %d", nTasks, largeStepRate);
        ProgressReporter progress(nTasks * largeStepRate, "Metropolis");
        vector<Task *> tasks;
        Mutex *filmMutex = Mutex::Create();
        Assert(IsPowerOf2(nTasks));
        uint32_t scramble[2] = { rng.RandomUInt(), rng.RandomUInt() };
        uint32_t pfreq = (x1-x0) * (y1-y0);
        for (uint32_t i = 0; i < nTasks; ++i) {
            float d[2];
            Sample02(i, scramble, d);
            tasks.push_back(new MLTTask(progress, pfreq, i,
                d[0], d[1], x0, x1, y0, y1, t0, t1, b, initialSample,
                scene, camera, this, filmMutex, lightDistribution));
        }
        EnqueueTasks(tasks);
        WaitForAllTasks();
        for (uint32_t i = 0; i < tasks.size(); ++i)
            delete tasks[i];
        progress.Done();
        Mutex::Destroy(filmMutex);
        delete lightDistribution;
    }
    camera->film->WriteImage();
    PBRT_MLT_FINISHED_RENDERING();
}
コード例 #20
0
// DiffusePRTIntegrator Method Definitions
DiffusePRTIntegrator::DiffusePRTIntegrator(int lm, int ns)
    : lmax(lm), nSamples(RoundUpPow2(ns)) {
    c_in = new Spectrum[SHTerms(lmax)];
}
コード例 #21
0
	int RoundSize(int size) const {
		return RoundUpPow2(size);
	}
コード例 #22
0
ファイル: metropolis.cpp プロジェクト: gmlealll/pbrt-v2
void MetropolisRenderer::Render(const Scene *scene) {
    int x0, x1, y0, y1;
    camera->film->GetPixelExtent(&x0, &x1, &y0, &y1);
    int nPixels = (x1-x0) * (y1-y0);
    float t0 = camera->shutterOpen;
    float t1 = camera->shutterClose;

    if (doDirectSeparately) {
        // Compute direct lighting before Metropolis light transport
        LDSampler sampler(x0, x1, y0, y1, directPixelSamples, t0, t1);
        Sample *sample = new Sample(&sampler, directLighting, NULL, scene);
        vector<Task *> directTasks;
        int nDirectTasks = max(32 * NumSystemCores(),
                         (camera->film->xResolution * camera->film->yResolution) / (16*16));
        nDirectTasks = RoundUpPow2(nDirectTasks);
        ProgressReporter directProgress(nDirectTasks, "Direct Lighting");
        for (int i = 0; i < nDirectTasks; ++i)
            directTasks.push_back(new SamplerRendererTask(scene, this, camera,
                &sampler, directProgress, sample, i, nDirectTasks));
        std::reverse(directTasks.begin(), directTasks.end());
        EnqueueTasks(directTasks);
        WaitForAllTasks();
        for (uint32_t i = 0; i < directTasks.size(); ++i)
            delete directTasks[i];
        delete sample;
        directProgress.Done();
    }
    // Take initial set of samples to compute $b$
    RNG rng(0);
    MemoryArena arena;
    vector<float> bootstrapSamples;
    float sumContrib = 0.f;
    bootstrapSamples.reserve(nBootstrap);
    MLTSample sample(maxDepth);
    for (int i = 0; i < nBootstrap; ++i) {
        // Compute contribution for random sample for MLT bootstrapping
        LargeStep(rng, &sample, maxDepth, x0, x1, y0, y1, t0, t1);
        float contrib = I(L(scene, this, camera, arena, rng, maxDepth,
                            doDirectSeparately, sample), sample);
        sumContrib += contrib;
        bootstrapSamples.push_back(contrib);
        arena.FreeAll();
    }
    float b = sumContrib / nBootstrap;

    // Select initial sample from bootstrap samples
    rng.Seed(0);
    float contribOffset = rng.RandomFloat() * sumContrib;
    sumContrib = 0.f;
    MLTSample initialSample(maxDepth);
    for (int i = 0; i < nBootstrap; ++i) {
        LargeStep(rng, &initialSample, maxDepth, x0, x1, y0, y1, t0, t1);
        sumContrib += bootstrapSamples[i];
        if (contribOffset < sumContrib)
            break;
    }

    // Launch tasks to generate Metropolis samples
    if (scene->lights.size() > 0) {
        int nTasks = int(nSamples / 50000);
        nTasks = max(nTasks, 32 * NumSystemCores());
        nTasks = min(nTasks, 32768);
        nSamples = (nSamples / nTasks) * nTasks;
        ProgressReporter progress(nTasks, "Metropolis");
        vector<Task *> tasks;
        Mutex *filmMutex = Mutex::Create();
        for (int i = 0; i < nTasks; ++i)
            tasks.push_back(new MLTTask(progress, i, int(nSamples/nTasks), nSamples,
                nPixels, x0, x1, y0, y1, t0, t1, b, largeStepProbability, initialSample,
                doDirectSeparately, maxConsecutiveRejects, maxDepth, scene, camera, this,
                &nSamplesFinished, filmMutex));
        EnqueueTasks(tasks);
        WaitForAllTasks();
        for (uint32_t i = 0; i < tasks.size(); ++i)
            delete tasks[i];
        progress.Done();
    }
    camera->film->WriteImage();
}