TerrainGenerator::Layer* TerrainModificationHelper::importLayer (const char* filename)
{
    Iff iff;
    if (iff.open (filename, true))
    {
        ShaderGroup* shaderGroup = new ShaderGroup;
        shaderGroup->load (iff);
        delete shaderGroup;

        FloraGroup* floraGroup = new FloraGroup;
        floraGroup->load (iff);
        delete floraGroup;

        RadialGroup* radialGroup = new RadialGroup;
        radialGroup->load (iff);
        delete radialGroup;

        EnvironmentGroup* environmentGroup = new EnvironmentGroup;
        environmentGroup->load (iff);
        delete environmentGroup;

        FractalGroup* fractalGroup = new FractalGroup;
        fractalGroup->load (iff);
        delete fractalGroup;

        TerrainGenerator::Layer* const newLayer = new TerrainGenerator::Layer;
        newLayer->load (iff, 0);

        return newLayer;
    }

    return 0;
}
void
ShaderInstance::compute_run_lazily (const ShaderGroup &group)
{
    if (shadingsys().m_lazylayers) {
        // lazylayers option turned on: unconditionally run shaders with no
        // outgoing connections ("root" nodes, including the last in the
        // group) or shaders that alter global variables (unless
        // 'lazyglobals' is turned on).
        if (shadingsys().m_lazyglobals) {
            if (group[group.nlayers()-1] == this)
                run_lazily (false);  // force run of last group
            else
                run_lazily ((outgoing_connections() && ! renderer_outputs())
                            || empty_instance() || merged_unused());
        }
        else
            run_lazily (outgoing_connections() && ! writes_globals()
                        && ! renderer_outputs());
#if 0
        // Suggested warning below... but are there use cases where people
        // want these to run (because they will extract the results they
        // want from output params)?
        if (! outgoing_connections() && ! empty_instance() &&
                ! writes_globals() && ! renderer_outputs())
            shadingsys().warning ("Layer \"%s\" (shader %s) will run even though it appears to have no used results",
                                  layername(), shadername());
#endif
    } else {
        // lazylayers option turned off: never run lazily
        run_lazily (false);
    }
}
Example #3
0
ShaderGroup* ShaderGroupManager::FindShaderGroupByName(const char* name)
{
    assert( name );

    for( CPPListNode* pNode = m_ShaderGroupList.GetHead(); pNode; pNode = pNode->GetNext() )
    {
        ShaderGroup* pShaderGroup = (ShaderGroup*)pNode;

        if( strcmp( pShaderGroup->GetName(), name ) == 0 )
        {
            return pShaderGroup;
        }
    }

    return 0;
}
bool
ShadingContext::execute (ShaderUse use, ShaderGroup &sgroup,
                         ShaderGlobals &ssg, bool run)
{
    DASSERT (use == ShadUseSurface);  // FIXME
    m_curuse = use;
    m_attribs = &sgroup;

    // Optimize if we haven't already
    if (sgroup.nlayers()) {
        sgroup.start_running ();
        if (! sgroup.optimized()) {
            shadingsys().optimize_group (sgroup);
            if (shadingsys().m_greedyjit && shadingsys().m_groups_to_compile_count) {
                // If we are greedily JITing, optimize/JIT everything now
                shadingsys().optimize_all_groups ();
            }
        }
        if (sgroup.does_nothing())
            return false;
    } else {
       // empty shader - nothing to do!
       return false;
    }

    // Allocate enough space on the heap
    size_t heap_size_needed = sgroup.llvm_groupdata_size();
    if (heap_size_needed > m_heap.size()) {
        if (shadingsys().debug())
            shadingsys().info ("  ShadingContext %p growing heap to %llu",
                               this, (unsigned long long) heap_size_needed);
        m_heap.resize (heap_size_needed);
    }
    // Zero out the heap memory we will be using
    if (shadingsys().m_clearmemory)
        memset (&m_heap[0], 0, heap_size_needed);

    // Set up closure storage
    m_closure_pool.clear();

    // Clear the message blackboard
    m_messages.clear ();

    // Clear miscellaneous scratch space
    m_scratch_pool.clear ();

    if (run) {
        ssg.context = this;
        ssg.Ci = NULL;
        RunLLVMGroupFunc run_func = sgroup.llvm_compiled_version();
        DASSERT (run_func);
        DASSERT (sgroup.llvm_groupdata_size() <= m_heap.size());
        run_func (&ssg, &m_heap[0]);
    }
    return true;
}
void OSLShaderGroupExec::execute_shading(
    const ShaderGroup&          shader_group,
    const ShadingPoint&         shading_point) const
{
    assert(m_osl_shading_context);
    assert(m_osl_thread_info);

    m_osl_shading_system.execute(
        *m_osl_shading_context,
        *shader_group.shadergroup_ref(),
        shading_point.get_osl_shader_globals());
}
void OSLShaderGroupExec::do_execute(
    const ShaderGroup&              shader_group,
    const ShadingPoint&             shading_point,
    const VisibilityFlags::Type     ray_flags) const
{
    assert(m_osl_shading_context);
    assert(m_osl_thread_info);

    shading_point.initialize_osl_shader_globals(
        shader_group,
        ray_flags,
        m_osl_shading_system.renderer());

    m_osl_shading_system.execute(
        m_osl_shading_context,
        *reinterpret_cast<OSL::ShaderGroup*>(shader_group.osl_shader_group()),
        shading_point.get_osl_shader_globals());
}
Color3f OSLShaderGroupExec::execute_background(
    const ShaderGroup&              shader_group,
    const Vector3f&                 outgoing) const
{
    assert(m_osl_shading_context);
    assert(m_osl_thread_info);

    OSL::ShaderGlobals sg;
    memset(&sg, 0, sizeof(OSL::ShaderGlobals));
    sg.I = outgoing;
    sg.renderer = m_osl_shading_system.renderer();
    sg.raytype = VisibilityFlags::CameraRay;

    m_osl_shading_system.execute(
        m_osl_shading_context,
        *reinterpret_cast<OSL::ShaderGroup*>(shader_group.osl_shader_group()),
        sg);

    return process_background_tree(sg.Ci);
}
void OSLShaderGroupExec::execute_transparency(
    const ShaderGroup&  shader_group,
    const ShadingPoint& shading_point,
    Alpha&              alpha,
    float*              holdout) const
{
    // Switch temporary the ray type to Shadow.
    ShadingRay::TypeType saved_type = shading_point.m_ray.m_type;
    shading_point.m_ray.m_type = ShadingRay::ShadowRay;

    m_osl_shading_system.execute(
        *m_osl_shading_context,
        *shader_group.shadergroup_ref(),
        shading_point.get_osl_shader_globals());

    process_transparency_tree(shading_point.get_osl_shader_globals().Ci, alpha);

    if (holdout)
        *holdout = process_holdout_tree(shading_point.get_osl_shader_globals().Ci);

    // Restore the original ray type.
    shading_point.m_ray.m_type = saved_type;
}
void OSLShaderGroupExec::execute_bump(
    const ShaderGroup&              shader_group,
    const ShadingPoint&             shading_point,
    const Vector2f&                 s) const
{
    // Choose between BSSRDF and BSDF.
    if (shader_group.has_subsurface() && s[0] < 0.5f)
    {
        do_execute(
            shader_group,
            shading_point,
            VisibilityFlags::SubsurfaceRay);

        CompositeSubsurfaceClosure c(
            Basis3f(shading_point.get_shading_basis()),
            shading_point.get_osl_shader_globals().Ci,
            m_arena);

        // Pick a shading basis from one of the BSSRDF closures.
        if (c.get_closure_count() > 0)
        {
            const size_t index = c.choose_closure(s[1]);
            shading_point.set_shading_basis(
                Basis3d(c.get_closure_shading_basis(index)));
        }
    }
    else
    {
        do_execute(
            shader_group,
            shading_point,
            VisibilityFlags::CameraRay);

        choose_bsdf_closure_shading_basis(shading_point, s);
    }
}
bool
ShadingContext::execute (ShaderGroup &sgroup, ShaderGlobals &ssg, bool run)
{
    m_attribs = &sgroup;

    // Optimize if we haven't already
    if (sgroup.nlayers()) {
        sgroup.start_running ();
        if (! sgroup.optimized()) {
            shadingsys().optimize_group (sgroup);
            if (shadingsys().m_greedyjit && shadingsys().m_groups_to_compile_count) {
                // If we are greedily JITing, optimize/JIT everything now
                shadingsys().optimize_all_groups ();
            }
        }
        if (sgroup.does_nothing())
            return false;
    } else {
       // empty shader - nothing to do!
       return false;
    }

    int profile = shadingsys().m_profile;
    OIIO::Timer timer (profile);

    // Allocate enough space on the heap
    size_t heap_size_needed = sgroup.llvm_groupdata_size();
    if (heap_size_needed > m_heap.size()) {
        if (shadingsys().debug())
            info ("  ShadingContext %p growing heap to %llu",
                  this, (unsigned long long) heap_size_needed);
        m_heap.resize (heap_size_needed);
    }
    // Zero out the heap memory we will be using
    if (shadingsys().m_clearmemory)
        memset (&m_heap[0], 0, heap_size_needed);

    // Set up closure storage
    m_closure_pool.clear();

    // Clear the message blackboard
    m_messages.clear ();

    // Clear miscellaneous scratch space
    m_scratch_pool.clear ();

    if (run) {
        ssg.context = this;
        ssg.renderer = renderer();
        ssg.Ci = NULL;
        RunLLVMGroupFunc run_func = sgroup.llvm_compiled_version();
        DASSERT (run_func);
        DASSERT (sgroup.llvm_groupdata_size() <= m_heap.size());
        run_func (&ssg, &m_heap[0]);
    }

    // Process any queued up error messages, warnings, printfs from shaders
    process_errors ();

    if (profile) {
        long long ticks = timer.ticks();
        shadingsys().m_stat_total_shading_time_ticks += ticks;
        sgroup.m_stat_total_shading_time_ticks += ticks;
    }

    return true;
}