TypeSpec OSLCompilerImpl::type_from_code (const char *code, int *advance) { TypeSpec t; int i = 0; switch (code[i]) { case 'i' : t = TypeDesc::TypeInt; break; case 'f' : t = TypeDesc::TypeFloat; break; case 'c' : t = TypeDesc::TypeColor; break; case 'p' : t = TypeDesc::TypePoint; break; case 'v' : t = TypeDesc::TypeVector; break; case 'n' : t = TypeDesc::TypeNormal; break; case 'm' : t = TypeDesc::TypeMatrix; break; case 's' : t = TypeDesc::TypeString; break; case 'x' : t = TypeDesc (TypeDesc::NONE); break; case 'X' : t = TypeDesc (TypeDesc::PTR); break; case 'L' : t = TypeDesc (TypeDesc::LONGLONG); break; case 'C' : // color closure t = TypeSpec (TypeDesc::TypeColor, true); break; case 'S' : // structure // Following the 'S' is the numeric structure ID t = TypeSpec ("struct", atoi (code+i+1)); // Skip to the last digit while (isdigit(code[i+1])) ++i; break; case '?' : break; // anything will match, so keep 'UNKNOWN' case '*' : break; // anything will match, so keep 'UNKNOWN' case '.' : break; // anything will match, so keep 'UNKNOWN' default: std::cerr << "Don't know how to decode type code '" << code << "' " << (int)code[0] << "\n"; ASSERT (0); // FIXME if (advance) *advance = 1; return TypeSpec(); } ++i; if (code[i] == '[') { ++i; t.make_array (-1); // signal arrayness, unknown length if (isdigit(code[i]) || code[i] == ']') { if (isdigit(code[i])) t.make_array (atoi (code+i)); while (isdigit(code[i])) ++i; if (code[i] == ']') ++i; } } if (advance) *advance = i; return t; }
/// structnode is an AST node representing a struct. It could be a /// struct variable, or a field of a struct (which is itself a struct), /// or an array element of a struct. Whatever, here we figure out some /// vital information about it: the name of the symbol representing the /// struct, and its type. void ASTstructselect::find_structsym (ASTNode *structnode, ustring &structname, TypeSpec &structtype) { // This node selects a field from a struct. The purpose of this // method is to "flatten" the possibly-nested (struct in struct, and // or array of structs) down to a symbol that represents the // particular field. In the process, we set structname and its // type structtype. ASSERT (structnode->typespec().is_structure() || structnode->typespec().is_structure_array()); if (structnode->nodetype() == variable_ref_node) { // The structnode is a top-level struct variable ASTvariable_ref *var = (ASTvariable_ref *) structnode; structname = var->name(); structtype = var->typespec(); } else if (structnode->nodetype() == structselect_node) { // The structnode is itself a field of another struct. ASTstructselect *thestruct = (ASTstructselect *) structnode; int structid, fieldid; Symbol *sym = thestruct->find_fieldsym (structid, fieldid); structname = sym->name(); structtype = sym->typespec(); } else if (structnode->nodetype() == index_node) { // The structnode is an element of an array of structs: ASTindex *arrayref = (ASTindex *) structnode; find_structsym (arrayref->lvalue().get(), structname, structtype); structtype.make_array (0); // clear its arrayness } else { ASSERT (0 && "Malformed ASTstructselect"); } }
llvm::Type * BackendLLVM::llvm_type_groupdata () { // If already computed, return it if (m_llvm_type_groupdata) return m_llvm_type_groupdata; std::vector<llvm::Type*> fields; // First, add the array that tells if each layer has run. But only make // slots for the layers that may be called/used. int sz = (m_num_used_layers + 3) & (~3); // Round up to 32 bit boundary fields.push_back (ll.type_array (ll.type_bool(), sz)); size_t offset = sz * sizeof(bool); // For each layer in the group, add entries for all params that are // connected or interpolated, and output params. Also mark those // symbols with their offset within the group struct. if (llvm_debug() >= 2) std::cout << "Group param struct:\n"; m_param_order_map.clear (); int order = 1; for (int layer = 0; layer < group().nlayers(); ++layer) { ShaderInstance *inst = group()[layer]; if (inst->unused()) continue; FOREACH_PARAM (Symbol &sym, inst) { TypeSpec ts = sym.typespec(); if (ts.is_structure()) // skip the struct symbol itself continue; int arraylen = std::max (1, sym.typespec().arraylength()); int deriv_mult = sym.has_derivs() ? 3 : 1; int n = arraylen * deriv_mult; ts.make_array (n); fields.push_back (llvm_type (ts)); // Alignment size_t align = sym.typespec().is_closure_based() ? sizeof(void*) : sym.typespec().simpletype().basesize(); if (offset & (align-1)) offset += align - (offset & (align-1)); if (llvm_debug() >= 2) std::cout << " " << inst->layername() << " (" << inst->id() << ") " << sym.mangled() << " " << ts.c_str() << ", field " << order << ", offset " << offset << std::endl; sym.dataoffset ((int)offset); offset += int(sym.size()) * deriv_mult; m_param_order_map[&sym] = order; ++order; } }
llvm::Type * BackendLLVM::llvm_type_groupdata () { // If already computed, return it if (m_llvm_type_groupdata) return m_llvm_type_groupdata; std::vector<llvm::Type*> fields; int offset = 0; int order = 0; if (llvm_debug() >= 2) std::cout << "Group param struct:\n"; // First, add the array that tells if each layer has run. But only make // slots for the layers that may be called/used. if (llvm_debug() >= 2) std::cout << " layers run flags: " << m_num_used_layers << " at offset " << offset << "\n"; int sz = (m_num_used_layers + 3) & (~3); // Round up to 32 bit boundary fields.push_back (ll.type_array (ll.type_bool(), sz)); offset += sz * sizeof(bool); ++order; // Now add the array that tells which userdata have been initialized, // and the space for the userdata values. int nuserdata = (int) group().m_userdata_names.size(); if (nuserdata) { if (llvm_debug() >= 2) std::cout << " userdata initialized flags: " << nuserdata << " at offset " << offset << ", field " << order << "\n"; ustring *names = & group().m_userdata_names[0]; TypeDesc *types = & group().m_userdata_types[0]; int *offsets = & group().m_userdata_offsets[0]; int sz = (nuserdata + 3) & (~3); fields.push_back (ll.type_array (ll.type_bool(), sz)); offset += nuserdata * sizeof(bool); ++order; for (int i = 0; i < nuserdata; ++i) { TypeDesc type = types[i]; int n = type.numelements() * 3; // always make deriv room type.arraylen = n; fields.push_back (llvm_type (type)); // Alignment int align = type.basesize(); offset = OIIO::round_to_multiple_of_pow2 (offset, align); if (llvm_debug() >= 2) std::cout << " userdata " << names[i] << ' ' << type << ", field " << order << ", offset " << offset << "\n"; offsets[i] = offset; offset += int(type.size()); ++order; } } // For each layer in the group, add entries for all params that are // connected or interpolated, and output params. Also mark those // symbols with their offset within the group struct. m_param_order_map.clear (); for (int layer = 0; layer < group().nlayers(); ++layer) { ShaderInstance *inst = group()[layer]; if (inst->unused()) continue; FOREACH_PARAM (Symbol &sym, inst) { TypeSpec ts = sym.typespec(); if (ts.is_structure()) // skip the struct symbol itself continue; const int arraylen = std::max (1, sym.typespec().arraylength()); const int derivSize = (sym.has_derivs() ? 3 : 1); ts.make_array (arraylen * derivSize); fields.push_back (llvm_type (ts)); // Alignment size_t align = sym.typespec().is_closure_based() ? sizeof(void*) : sym.typespec().simpletype().basesize(); if (offset & (align-1)) offset += align - (offset & (align-1)); if (llvm_debug() >= 2) std::cout << " " << inst->layername() << " (" << inst->id() << ") " << sym.mangled() << " " << ts.c_str() << ", field " << order << ", size " << derivSize * int(sym.size()) << ", offset " << offset << std::endl; sym.dataoffset ((int)offset); offset += derivSize* int(sym.size()); m_param_order_map[&sym] = order; ++order; } }