llvm::Type *
BackendLLVM::llvm_type_groupdata ()
{
    // If already computed, return it
    if (m_llvm_type_groupdata)
        return m_llvm_type_groupdata;

    std::vector<llvm::Type*> fields;

    // First, add the array that tells if each layer has run.  But only make
    // slots for the layers that may be called/used.
    int sz = (m_num_used_layers + 3) & (~3);  // Round up to 32 bit boundary
    fields.push_back (ll.type_array (ll.type_bool(), sz));
    size_t offset = sz * sizeof(bool);

    // For each layer in the group, add entries for all params that are
    // connected or interpolated, and output params.  Also mark those
    // symbols with their offset within the group struct.
    if (llvm_debug() >= 2)
        std::cout << "Group param struct:\n";
    m_param_order_map.clear ();
    int order = 1;
    for (int layer = 0;  layer < group().nlayers();  ++layer) {
        ShaderInstance *inst = group()[layer];
        if (inst->unused())
            continue;
        FOREACH_PARAM (Symbol &sym, inst) {
            TypeSpec ts = sym.typespec();
            if (ts.is_structure())  // skip the struct symbol itself
                continue;
            int arraylen = std::max (1, sym.typespec().arraylength());
            int deriv_mult = sym.has_derivs() ? 3 : 1;
            int n = arraylen * deriv_mult;
            ts.make_array (n);
            fields.push_back (llvm_type (ts));

            // Alignment
            size_t align = sym.typespec().is_closure_based() ? sizeof(void*) :
                    sym.typespec().simpletype().basesize();
            if (offset & (align-1))
                offset += align - (offset & (align-1));
            if (llvm_debug() >= 2)
                std::cout << "  " << inst->layername() 
                          << " (" << inst->id() << ") " << sym.mangled()
                          << " " << ts.c_str() << ", field " << order 
                          << ", offset " << offset << std::endl;
            sym.dataoffset ((int)offset);
            offset += int(sym.size()) * deriv_mult;

            m_param_order_map[&sym] = order;
            ++order;
        }
    }
Esempio n. 2
0
llvm::Type *
BackendLLVM::llvm_type_groupdata ()
{
    // If already computed, return it
    if (m_llvm_type_groupdata)
        return m_llvm_type_groupdata;

    std::vector<llvm::Type*> fields;
    int offset = 0;
    int order = 0;

    if (llvm_debug() >= 2)
        std::cout << "Group param struct:\n";

    // First, add the array that tells if each layer has run.  But only make
    // slots for the layers that may be called/used.
    if (llvm_debug() >= 2)
        std::cout << "  layers run flags: " << m_num_used_layers
                  << " at offset " << offset << "\n";
    int sz = (m_num_used_layers + 3) & (~3);  // Round up to 32 bit boundary
    fields.push_back (ll.type_array (ll.type_bool(), sz));
    offset += sz * sizeof(bool);
    ++order;

    // Now add the array that tells which userdata have been initialized,
    // and the space for the userdata values.
    int nuserdata = (int) group().m_userdata_names.size();
    if (nuserdata) {
        if (llvm_debug() >= 2)
            std::cout << "  userdata initialized flags: " << nuserdata
                      << " at offset " << offset << ", field " << order << "\n";
        ustring *names = & group().m_userdata_names[0];
        TypeDesc *types = & group().m_userdata_types[0];
        int *offsets = & group().m_userdata_offsets[0];
        int sz = (nuserdata + 3) & (~3);
        fields.push_back (ll.type_array (ll.type_bool(), sz));
        offset += nuserdata * sizeof(bool);
        ++order;
        for (int i = 0; i < nuserdata; ++i) {
            TypeDesc type = types[i];
            int n = type.numelements() * 3;   // always make deriv room
            type.arraylen = n;
            fields.push_back (llvm_type (type));
            // Alignment
            int align = type.basesize();
            offset = OIIO::round_to_multiple_of_pow2 (offset, align);
            if (llvm_debug() >= 2)
                std::cout << "  userdata " << names[i] << ' ' << type
                          << ", field " << order << ", offset " << offset << "\n";
            offsets[i] = offset;
            offset += int(type.size());
            ++order;
        }
    }

    // For each layer in the group, add entries for all params that are
    // connected or interpolated, and output params.  Also mark those
    // symbols with their offset within the group struct.
    m_param_order_map.clear ();
    for (int layer = 0;  layer < group().nlayers();  ++layer) {
        ShaderInstance *inst = group()[layer];
        if (inst->unused())
            continue;
        FOREACH_PARAM (Symbol &sym, inst) {
            TypeSpec ts = sym.typespec();
            if (ts.is_structure())  // skip the struct symbol itself
                continue;
            const int arraylen = std::max (1, sym.typespec().arraylength());
            const int derivSize = (sym.has_derivs() ? 3 : 1);
            ts.make_array (arraylen * derivSize);
            fields.push_back (llvm_type (ts));

            // Alignment
            size_t align = sym.typespec().is_closure_based() ? sizeof(void*) :
                    sym.typespec().simpletype().basesize();
            if (offset & (align-1))
                offset += align - (offset & (align-1));
            if (llvm_debug() >= 2)
                std::cout << "  " << inst->layername() 
                          << " (" << inst->id() << ") " << sym.mangled()
                          << " " << ts.c_str() << ", field " << order 
                          << ", size " << derivSize * int(sym.size())
                          << ", offset " << offset << std::endl;
            sym.dataoffset ((int)offset);
            offset += derivSize* int(sym.size());

            m_param_order_map[&sym] = order;
            ++order;
        }
    }