void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 0; int ptrfree = 1; for(size_t i=0; i < jl_tuple_len(st->types); i++) { jl_value_t *ty = jl_tupleref(st->types, i); size_t fsz, al; if (jl_isbits(ty) && (al=((jl_datatype_t*)ty)->alignment)!=0) { fsz = jl_datatype_size(ty); st->fields[i].isptr = 0; } else { fsz = sizeof(void*); al = fsz; st->fields[i].isptr = 1; ptrfree = 0; } sz = LLT_ALIGN(sz, al); if (al > alignm) alignm = al; st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); st->pointerfree = ptrfree && !st->abstract; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); al = ((jl_datatype_t*)ty)->alignment; st->fields[i].isptr = 0; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; st->fields[i].isptr = 1; ptrfree = 0; } if (al != 0) { sz = LLT_ALIGN(sz, al); if (al > alignm) alignm = al; } st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); st->pointerfree = ptrfree && !st->abstract; }
static Type *julia_type_to_llvm(jl_value_t *jt) { if (jt == (jl_value_t*)jl_bool_type) return T_int1; if (jt == (jl_value_t*)jl_float32_type) return T_float32; if (jt == (jl_value_t*)jl_float64_type) return T_float64; if (jt == (jl_value_t*)jl_bottom_type) return T_void; if (!jl_is_leaf_type(jt)) return jl_pvalue_llvmt; if (jl_is_cpointer_type(jt)) { Type *lt = julia_type_to_llvm(jl_tparam0(jt)); if (lt == NULL) return NULL; if (lt == T_void) lt = T_int8; return PointerType::get(lt, 0); } if (jl_is_bitstype(jt)) { int nb = jl_datatype_size(jt)*8; if (nb == 8) return T_int8; if (nb == 16) return T_int16; if (nb == 32) return T_int32; if (nb == 64) return T_int64; else return Type::getIntNTy(getGlobalContext(), nb); } if (jl_isbits(jt)) { if (((jl_datatype_t*)jt)->size == 0) { // TODO: come up with a representation for a 0-size value, // and make this 0 size everywhere. as an argument, simply // skip passing it. return jl_pvalue_llvmt; } return julia_struct_to_llvm(jt); } return jl_pvalue_llvmt; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; assert(0 <= st->fielddesc_type && st->fielddesc_type <= 2); uint64_t max_offset = (((uint64_t)1) << (1 << (3 + st->fielddesc_type))) - 1; uint64_t max_size = max_offset >> 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; jl_field_setisptr(st, i, 0); if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; jl_field_setisptr(st, i, 1); ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } jl_field_setoffset(st, i, sz); jl_field_setsize(st, i, fsz); if (__unlikely(max_offset - sz < fsz)) jl_throw(jl_overflow_exception); sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) st->haspadding = 1; st->pointerfree = ptrfree && !st->abstract; }
static Value *typed_store(Value *ptr, Value *idx_0based, Value *rhs, jl_value_t *jltype, jl_codectx_t *ctx) { Type *elty = julia_type_to_llvm(jltype); assert(elty != NULL); if (elty==T_int1) { elty = T_int8; } if (jl_isbits(jltype) && ((jl_datatype_t*)jltype)->size > 0) rhs = emit_unbox(elty, PointerType::get(elty,0), rhs); else rhs = boxed(rhs); Value *data = builder.CreateBitCast(ptr, PointerType::get(elty, 0)); return builder.CreateStore(rhs, builder.CreateGEP(data, idx_0based)); }
// this is used to wrap values for generic contexts, where a // dynamically-typed value is required (e.g. argument to unknown function). // if it's already a pointer it's left alone. static Value *boxed(Value *v, jl_value_t *jt) { Type *t = v->getType(); if (t == jl_pvalue_llvmt) return v; if (t == T_void) return literal_pointer_val((jl_value_t*)jl_nothing); if (t == T_int1) return julia_bool(v); if (jt == NULL) jt = julia_type_of(v); jl_datatype_t *jb = (jl_datatype_t*)jt; assert(jl_is_datatype(jb)); if (jb == jl_int8_type) return builder.CreateCall(box_int8_func, builder.CreateSExt(v, T_int32)); if (jb == jl_int16_type) return builder.CreateCall(box_int16_func, v); if (jb == jl_int32_type) return builder.CreateCall(box_int32_func, v); if (jb == jl_int64_type) return builder.CreateCall(box_int64_func, v); if (jb == jl_float32_type) return builder.CreateCall(box_float32_func, v); //if (jb == jl_float64_type) return builder.CreateCall(box_float64_func, v); if (jb == jl_float64_type) { // manually inline alloc & init of Float64 box. cheap, I know. #ifdef _P64 Value *newv = builder.CreateCall(jlalloc2w_func); #else Value *newv = builder.CreateCall(jlalloc3w_func); #endif return init_bits_value(newv, literal_pointer_val(jt), t, v); } if (jb == jl_uint8_type) return builder.CreateCall(box_uint8_func, builder.CreateZExt(v, T_int32)); if (jb == jl_uint16_type) return builder.CreateCall(box_uint16_func, v); if (jb == jl_uint32_type) return builder.CreateCall(box_uint32_func, v); if (jb == jl_uint64_type) return builder.CreateCall(box_uint64_func, v); if (jb == jl_char_type) return builder.CreateCall(box_char_func, v); // TODO: skip the call for constant arguments if (!jl_isbits(jt)) { assert("Don't know how to box this type" && false); return NULL; } if (!jb->abstract && jb->size == 0) { if (jb->instance == NULL) jl_new_struct_uninit(jb); assert(jb->instance != NULL); return literal_pointer_val(jb->instance); } return allocate_box_dynamic(literal_pointer_val(jt),jl_datatype_size(jt),v); }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); if (__unlikely(fsz > JL_FIELD_MAX_SIZE)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; st->fields[i].isptr = 0; if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; st->fields[i].isptr = 1; ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (alsz > sz) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } if (__unlikely(sz > JL_FIELD_MAX_OFFSET)) jl_throw(jl_overflow_exception); st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); st->pointerfree = ptrfree && !st->abstract; }
static unsigned union_isbits(jl_value_t *ty, size_t *nbytes, size_t *align) { if (jl_is_uniontype(ty)) { unsigned na = union_isbits(((jl_uniontype_t*)ty)->a, nbytes, align); if (na == 0) return 0; unsigned nb = union_isbits(((jl_uniontype_t*)ty)->b, nbytes, align); if (nb == 0) return 0; return na + nb; } if (jl_isbits(ty)) { size_t sz = jl_datatype_size(ty); size_t al = jl_datatype_align(ty); if (*nbytes < sz) *nbytes = sz; if (*align < al) *align = al; return 1; } return 0; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; int homogeneous = 1; jl_value_t *lastty = NULL; assert(0 <= st->fielddesc_type && st->fielddesc_type <= 2); uint64_t max_offset = (((uint64_t)1) << (1 << (3 + st->fielddesc_type))) - 1; uint64_t max_size = max_offset >> 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; jl_field_setisptr(st, i, 0); if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; jl_field_setisptr(st, i, 1); ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } homogeneous &= lastty==NULL || lastty==ty; lastty = ty; jl_field_setoffset(st, i, sz); jl_field_setsize(st, i, fsz); if (__unlikely(max_offset - sz < fsz)) jl_throw(jl_overflow_exception); sz += fsz; } if (homogeneous && lastty!=NULL && jl_is_tuple_type(st)) { // Some tuples become LLVM vectors with stronger alignment than what was calculated above. unsigned al = jl_special_vector_alignment(jl_datatype_nfields(st), lastty); assert(al % alignm == 0); if (al) alignm = al; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) st->haspadding = 1; st->pointerfree = ptrfree && !st->abstract; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int homogeneous = 1; jl_value_t *lastty = NULL; uint64_t max_offset = (((uint64_t)1) << 32) - 1; uint64_t max_size = max_offset >> 1; uint32_t nfields = jl_svec_len(st->types); jl_fielddesc32_t* desc = (jl_fielddesc32_t*) alloca(nfields * sizeof(jl_fielddesc32_t)); int haspadding = 0; assert(st->name == jl_tuple_typename || st == jl_sym_type || st == jl_simplevector_type || nfields != 0); for (size_t i = 0; i < nfields; i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty) && ((jl_datatype_t*)ty)->layout) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->layout->alignment; desc[i].isptr = 0; if (((jl_datatype_t*)ty)->layout->haspadding) haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; desc[i].isptr = 1; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } homogeneous &= lastty==NULL || lastty==ty; lastty = ty; desc[i].offset = sz; desc[i].size = fsz; if (__unlikely(max_offset - sz < fsz)) jl_throw(jl_overflow_exception); sz += fsz; } if (homogeneous && lastty!=NULL && jl_is_tuple_type(st)) { // Some tuples become LLVM vectors with stronger alignment than what was calculated above. unsigned al = jl_special_vector_alignment(nfields, lastty); assert(al % alignm == 0); if (al) alignm = al; } st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) haspadding = 1; st->layout = jl_get_layout(nfields, alignm, haspadding, desc); }
// `v` might be pointing to a field inlined in a structure therefore // `jl_typeof(v)` may not be the same with `vt` and only `vt` should be // used to determine the type of the value. // This is necessary to make sure that this function doesn't allocate any // memory through the Julia GC static size_t jl_static_show_x_(JL_STREAM *out, jl_value_t *v, jl_datatype_t *vt, struct recur_list *depth) { size_t n = 0; if ((uintptr_t)vt < 4096U) { n += jl_printf(out, "<?#%p::%p>", (void*)v, (void*)vt); } else if ((uintptr_t)v < 4096U) { n += jl_printf(out, "<?#%p::", (void*)v); n += jl_static_show_x(out, (jl_value_t*)vt, depth); n += jl_printf(out, ">"); } else if (vt == jl_method_type) { jl_method_t *m = (jl_method_t*)v; n += jl_static_show_x(out, (jl_value_t*)m->module, depth); n += jl_printf(out, ".%s(...)", jl_symbol_name(m->name)); } else if (vt == jl_method_instance_type) { jl_method_instance_t *li = (jl_method_instance_t*)v; if (jl_is_method(li->def.method)) { jl_method_t *m = li->def.method; n += jl_static_show_x(out, (jl_value_t*)m->module, depth); if (li->specTypes) { n += jl_printf(out, "."); n += jl_show_svec(out, ((jl_datatype_t*)jl_unwrap_unionall(li->specTypes))->parameters, jl_symbol_name(m->name), "(", ")"); } else { n += jl_printf(out, ".%s(?)", jl_symbol_name(m->name)); } } else { n += jl_static_show_x(out, (jl_value_t*)li->def.module, depth); n += jl_printf(out, ".<toplevel thunk> -> "); n += jl_static_show_x(out, li->inferred, depth); } } else if (vt == jl_simplevector_type) { n += jl_show_svec(out, (jl_svec_t*)v, "svec", "(", ")"); } else if (vt == jl_datatype_type) { jl_datatype_t *dv = (jl_datatype_t*)v; jl_sym_t *globname = dv->name->mt != NULL ? dv->name->mt->name : NULL; int globfunc = 0; if (globname && !strchr(jl_symbol_name(globname), '#') && !strchr(jl_symbol_name(globname), '@') && dv->name->module && jl_binding_resolved_p(dv->name->module, globname)) { jl_binding_t *b = jl_get_binding(dv->name->module, globname); if (b && jl_typeof(b->value) == v) globfunc = 1; } jl_sym_t *sym = globfunc ? globname : dv->name->name; char *sn = jl_symbol_name(sym); int hidden = !globfunc && strchr(sn, '#'); size_t i = 0; int quote = 0; if (hidden) { n += jl_printf(out, "getfield("); } else if (globfunc) { n += jl_printf(out, "typeof("); } if (dv->name->module != jl_core_module || !jl_module_exports_p(jl_core_module, sym)) { n += jl_static_show_x(out, (jl_value_t*)dv->name->module, depth); if (!hidden) { n += jl_printf(out, "."); if (globfunc && !jl_id_start_char(u8_nextchar(sn, &i))) { n += jl_printf(out, ":("); quote = 1; } } } if (hidden) { n += jl_printf(out, ", Symbol(\""); n += jl_printf(out, "%s", sn); n += jl_printf(out, "\"))"); } else { n += jl_printf(out, "%s", sn); if (globfunc) { n += jl_printf(out, ")"); if (quote) n += jl_printf(out, ")"); } } if (dv->parameters && (jl_value_t*)dv != dv->name->wrapper && (jl_has_free_typevars(v) || (jl_value_t*)dv != (jl_value_t*)jl_tuple_type)) { size_t j, tlen = jl_nparams(dv); if (tlen > 0) { n += jl_printf(out, "{"); for (j = 0; j < tlen; j++) { jl_value_t *p = jl_tparam(dv,j); n += jl_static_show_x(out, p, depth); if (j != tlen-1) n += jl_printf(out, ", "); } n += jl_printf(out, "}"); } else if (dv->name == jl_tuple_typename) { n += jl_printf(out, "{}"); } } } else if (vt == jl_intrinsic_type) { int f = *(uint32_t*)jl_data_ptr(v); n += jl_printf(out, "#<intrinsic #%d %s>", f, jl_intrinsic_name(f)); } else if (vt == jl_int64_type) { n += jl_printf(out, "%" PRId64, *(int64_t*)v); } else if (vt == jl_int32_type) { n += jl_printf(out, "%" PRId32, *(int32_t*)v); } else if (vt == jl_int16_type) { n += jl_printf(out, "%" PRId16, *(int16_t*)v); } else if (vt == jl_int8_type) { n += jl_printf(out, "%" PRId8, *(int8_t*)v); } else if (vt == jl_uint64_type) { n += jl_printf(out, "0x%016" PRIx64, *(uint64_t*)v); } else if (vt == jl_uint32_type) { n += jl_printf(out, "0x%08" PRIx32, *(uint32_t*)v); } else if (vt == jl_uint16_type) { n += jl_printf(out, "0x%04" PRIx16, *(uint16_t*)v); } else if (vt == jl_uint8_type) { n += jl_printf(out, "0x%02" PRIx8, *(uint8_t*)v); } else if (jl_is_cpointer_type((jl_value_t*)vt)) { #ifdef _P64 n += jl_printf(out, "0x%016" PRIx64, *(uint64_t*)v); #else n += jl_printf(out, "0x%08" PRIx32, *(uint32_t*)v); #endif } else if (vt == jl_float32_type) { n += jl_printf(out, "%gf", *(float*)v); } else if (vt == jl_float64_type) { n += jl_printf(out, "%g", *(double*)v); } else if (vt == jl_bool_type) { n += jl_printf(out, "%s", *(uint8_t*)v ? "true" : "false"); } else if ((jl_value_t*)vt == jl_typeof(jl_nothing)) { n += jl_printf(out, "nothing"); } else if (vt == jl_string_type) { n += jl_printf(out, "\""); jl_uv_puts(out, jl_string_data(v), jl_string_len(v)); n += jl_string_len(v); n += jl_printf(out, "\""); } else if (v == jl_bottom_type) { n += jl_printf(out, "Union{}"); } else if (vt == jl_uniontype_type) { n += jl_printf(out, "Union{"); while (jl_is_uniontype(v)) { // tail-recurse on b to flatten the printing of the Union structure in the common case n += jl_static_show_x(out, ((jl_uniontype_t*)v)->a, depth); n += jl_printf(out, ", "); v = ((jl_uniontype_t*)v)->b; } n += jl_static_show_x(out, v, depth); n += jl_printf(out, "}"); } else if (vt == jl_unionall_type) { jl_unionall_t *ua = (jl_unionall_t*)v; n += jl_static_show_x(out, ua->body, depth); n += jl_printf(out, " where "); n += jl_static_show_x(out, (jl_value_t*)ua->var, depth->prev); } else if (vt == jl_tvar_type) { // show type-var bounds only if they aren't going to be printed by UnionAll later jl_tvar_t *var = (jl_tvar_t*)v; struct recur_list *p; int showbounds = 1; for (p = depth; p != NULL; p = p->prev) { if (jl_is_unionall(p->v) && ((jl_unionall_t*)p->v)->var == var) { showbounds = 0; break; } } jl_value_t *lb = var->lb, *ub = var->ub; if (showbounds && lb != jl_bottom_type) { // show type-var lower bound if it is defined int ua = jl_is_unionall(lb); if (ua) n += jl_printf(out, "("); n += jl_static_show_x(out, lb, depth); if (ua) n += jl_printf(out, ")"); n += jl_printf(out, "<:"); } n += jl_printf(out, "%s", jl_symbol_name(var->name)); if (showbounds && (ub != (jl_value_t*)jl_any_type || lb != jl_bottom_type)) { // show type-var upper bound if it is defined, or if we showed the lower bound int ua = jl_is_unionall(ub); n += jl_printf(out, "<:"); if (ua) n += jl_printf(out, "("); n += jl_static_show_x(out, ub, depth); if (ua) n += jl_printf(out, ")"); } } else if (vt == jl_module_type) { jl_module_t *m = (jl_module_t*)v; if (m->parent != m && m->parent != jl_main_module) { n += jl_static_show_x(out, (jl_value_t*)m->parent, depth); n += jl_printf(out, "."); } n += jl_printf(out, "%s", jl_symbol_name(m->name)); } else if (vt == jl_sym_type) { char *sn = jl_symbol_name((jl_sym_t*)v); int quoted = !jl_is_identifier(sn) && jl_operator_precedence(sn) == 0; if (quoted) n += jl_printf(out, "Symbol(\""); else n += jl_printf(out, ":"); n += jl_printf(out, "%s", sn); if (quoted) n += jl_printf(out, "\")"); } else if (vt == jl_ssavalue_type) { n += jl_printf(out, "SSAValue(%" PRIuPTR ")", (uintptr_t)((jl_ssavalue_t*)v)->id); } else if (vt == jl_globalref_type) { n += jl_static_show_x(out, (jl_value_t*)jl_globalref_mod(v), depth); n += jl_printf(out, ".%s", jl_symbol_name(jl_globalref_name(v))); } else if (vt == jl_labelnode_type) { n += jl_printf(out, "%" PRIuPTR ":", jl_labelnode_label(v)); } else if (vt == jl_gotonode_type) { n += jl_printf(out, "goto %" PRIuPTR, jl_gotonode_label(v)); } else if (vt == jl_quotenode_type) { jl_value_t *qv = *(jl_value_t**)v; if (!jl_is_symbol(qv)) { n += jl_printf(out, "quote "); } else { n += jl_printf(out, ":("); } n += jl_static_show_x(out, qv, depth); if (!jl_is_symbol(qv)) { n += jl_printf(out, " end"); } else { n += jl_printf(out, ")"); } } else if (vt == jl_newvarnode_type) { n += jl_printf(out, "<newvar "); n += jl_static_show_x(out, *(jl_value_t**)v, depth); n += jl_printf(out, ">"); } else if (vt == jl_linenumbernode_type) { n += jl_printf(out, "#= "); n += jl_static_show_x(out, jl_linenode_file(v), depth); n += jl_printf(out, ":%" PRIuPTR " =#", jl_linenode_line(v)); } else if (vt == jl_expr_type) { jl_expr_t *e = (jl_expr_t*)v; if (e->head == assign_sym && jl_array_len(e->args) == 2) { n += jl_static_show_x(out, jl_exprarg(e,0), depth); n += jl_printf(out, " = "); n += jl_static_show_x(out, jl_exprarg(e,1), depth); } else { char sep = ' '; if (e->head == body_sym) sep = '\n'; n += jl_printf(out, "Expr(:%s", jl_symbol_name(e->head)); size_t i, len = jl_array_len(e->args); for (i = 0; i < len; i++) { n += jl_printf(out, ",%c", sep); n += jl_static_show_x(out, jl_exprarg(e,i), depth); } n += jl_printf(out, ")::"); n += jl_static_show_x(out, e->etype, depth); } } else if (jl_is_array_type(vt)) { n += jl_static_show_x(out, (jl_value_t*)vt, depth); n += jl_printf(out, "["); size_t j, tlen = jl_array_len(v); jl_array_t *av = (jl_array_t*)v; jl_datatype_t *el_type = (jl_datatype_t*)jl_tparam0(vt); int nlsep = 0; if (av->flags.ptrarray) { // print arrays with newlines, unless the elements are probably small for (j = 0; j < tlen; j++) { jl_value_t *p = jl_array_ptr_ref(av, j); if (p != NULL && (uintptr_t)p >= 4096U) { jl_value_t *p_ty = jl_typeof(p); if ((uintptr_t)p_ty >= 4096U) { if (!jl_isbits(p_ty)) { nlsep = 1; break; } } } } } if (nlsep && tlen > 1) n += jl_printf(out, "\n "); for (j = 0; j < tlen; j++) { if (av->flags.ptrarray) { n += jl_static_show_x(out, jl_array_ptr_ref(v, j), depth); } else { char *ptr = ((char*)av->data) + j * av->elsize; n += jl_static_show_x_(out, (jl_value_t*)ptr, el_type, depth); } if (j != tlen - 1) n += jl_printf(out, nlsep ? ",\n " : ", "); } n += jl_printf(out, "]"); } else if (vt == jl_loaderror_type) { n += jl_printf(out, "LoadError(at "); n += jl_static_show_x(out, *(jl_value_t**)v, depth); // Access the field directly to avoid allocation n += jl_printf(out, " line %" PRIdPTR, ((intptr_t*)v)[1]); n += jl_printf(out, ": "); n += jl_static_show_x(out, ((jl_value_t**)v)[2], depth); n += jl_printf(out, ")"); } else if (vt == jl_errorexception_type) { n += jl_printf(out, "ErrorException("); n += jl_static_show_x(out, *(jl_value_t**)v, depth); n += jl_printf(out, ")"); } else if (jl_is_datatype(vt)) { int istuple = jl_is_tuple_type(vt); if (!istuple) n += jl_static_show_x(out, (jl_value_t*)vt, depth); n += jl_printf(out, "("); size_t nb = jl_datatype_size(vt); size_t tlen = jl_datatype_nfields(vt); if (nb > 0 && tlen == 0) { uint8_t *data = (uint8_t*)v; n += jl_printf(out, "0x"); for(int i = nb - 1; i >= 0; --i) n += jl_printf(out, "%02" PRIx8, data[i]); } else { size_t i = 0; if (vt == jl_typemap_entry_type) i = 1; for (; i < tlen; i++) { if (!istuple) { n += jl_printf(out, "%s", jl_symbol_name(jl_field_name(vt, i))); n += jl_printf(out, "="); } size_t offs = jl_field_offset(vt, i); char *fld_ptr = (char*)v + offs; if (jl_field_isptr(vt, i)) { n += jl_static_show_x(out, *(jl_value_t**)fld_ptr, depth); } else { jl_datatype_t *ft = (jl_datatype_t*)jl_field_type(vt, i); if (jl_is_uniontype(ft)) { uint8_t sel = ((uint8_t*)fld_ptr)[jl_field_size(vt, i) - 1]; ft = (jl_datatype_t*)jl_nth_union_component((jl_value_t*)ft, sel); } n += jl_static_show_x_(out, (jl_value_t*)fld_ptr, ft, depth); } if (istuple && tlen == 1) n += jl_printf(out, ","); else if (i != tlen - 1) n += jl_printf(out, ", "); } if (vt == jl_typemap_entry_type) { n += jl_printf(out, ", next=↩︎\n "); n += jl_static_show_x(out, jl_fieldref(v, 0), depth); } } n += jl_printf(out, ")"); } else { n += jl_printf(out, "<?#%p::", (void*)v); n += jl_static_show_x(out, (jl_value_t*)vt, depth); n += jl_printf(out, ">"); } return n; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int homogeneous = 1; jl_value_t *lastty = NULL; uint64_t max_offset = (((uint64_t)1) << 32) - 1; uint64_t max_size = max_offset >> 1; if (st->name->wrapper) { // If layout doesn't depend on type parameters, it's stored in st->name->wrapper // and reused by all subtypes. jl_datatype_t *w = (jl_datatype_t*)jl_unwrap_unionall(st->name->wrapper); if (st != w && // this check allows us to re-compute layout for some types during init w->layout) { st->layout = w->layout; st->size = w->size; return; } } if (st->types == NULL) return; uint32_t nfields = jl_svec_len(st->types); if (nfields == 0) { if (st == jl_sym_type || st == jl_string_type) { // opaque layout - heap-allocated blob static const jl_datatype_layout_t opaque_byte_layout = {0, 1, 0, 1, 0}; st->layout = &opaque_byte_layout; } else if (st == jl_simplevector_type || st->name == jl_array_typename) { static const jl_datatype_layout_t opaque_ptr_layout = {0, sizeof(void*), 0, 1, 0}; st->layout = &opaque_ptr_layout; } else { // reuse the same layout for all singletons static const jl_datatype_layout_t singleton_layout = {0, 1, 0, 0, 0}; st->layout = &singleton_layout; } return; } if (!jl_is_leaf_type((jl_value_t*)st)) { // compute layout whenever field types have no free variables for (size_t i = 0; i < nfields; i++) { if (jl_has_free_typevars(jl_field_type(st, i))) return; } } size_t descsz = nfields * sizeof(jl_fielddesc32_t); jl_fielddesc32_t *desc; if (descsz < jl_page_size) desc = (jl_fielddesc32_t*)alloca(descsz); else desc = (jl_fielddesc32_t*)malloc(descsz); int haspadding = 0; assert(st->name == jl_tuple_typename || st == jl_sym_type || st == jl_simplevector_type || nfields != 0); for (size_t i = 0; i < nfields; i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty) && ((jl_datatype_t*)ty)->layout) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) goto throw_ovf; al = jl_datatype_align(ty); desc[i].isptr = 0; if (((jl_datatype_t*)ty)->layout->haspadding) haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; desc[i].isptr = 1; } assert(al <= JL_HEAP_ALIGNMENT && (JL_HEAP_ALIGNMENT % al) == 0); if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } homogeneous &= lastty==NULL || lastty==ty; lastty = ty; desc[i].offset = sz; desc[i].size = fsz; if (__unlikely(max_offset - sz < fsz)) goto throw_ovf; sz += fsz; } if (homogeneous && lastty!=NULL && jl_is_tuple_type(st)) { // Some tuples become LLVM vectors with stronger alignment than what was calculated above. unsigned al = jl_special_vector_alignment(nfields, lastty); assert(al % alignm == 0); // JL_HEAP_ALIGNMENT is the biggest alignment we can guarantee on the heap. if (al > JL_HEAP_ALIGNMENT) alignm = JL_HEAP_ALIGNMENT; else if (al) alignm = al; } st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) haspadding = 1; st->layout = jl_get_layout(nfields, alignm, haspadding, desc); if (descsz >= jl_page_size) free(desc); return; throw_ovf: if (descsz >= jl_page_size) free(desc); jl_throw(jl_overflow_exception); }