JL_DLLEXPORT jl_value_t *jl_new_structv(jl_datatype_t *type, jl_value_t **args, uint32_t na) { jl_ptls_t ptls = jl_get_ptls_states(); if (type->instance != NULL) return type->instance; size_t nf = jl_datatype_nfields(type); jl_value_t *jv = jl_gc_alloc(ptls, jl_datatype_size(type), type); JL_GC_PUSH1(&jv); for (size_t i = 0; i < na; i++) { jl_value_t *ft = jl_field_type(type, i); if (!jl_isa(args[i], ft)) jl_type_error("new", ft, args[i]); jl_set_nth_field(jv, i, args[i]); } for(size_t i=na; i < nf; i++) { if (jl_field_isptr(type, i)) { *(jl_value_t**)((char*)jl_data_ptr(jv)+jl_field_offset(type,i)) = NULL; } else { jl_value_t *ft = jl_field_type(type, i); if (jl_is_uniontype(ft)) { uint8_t *psel = &((uint8_t *)jv)[jl_field_offset(type, i) + jl_field_size(type, i) - 1]; *psel = 0; } } } JL_GC_POP(); return jv; }
static int sig_match_by_type_simple(jl_value_t **types, size_t n, jl_tupletype_t *sig, size_t lensig, int va) { size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = jl_field_type(sig, i); jl_value_t *a = types[i]; if (jl_is_type_type(decl)) { jl_value_t *tp0 = jl_tparam0(decl); if (jl_is_type_type(a)) { if (tp0 == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else if (jl_is_typevar(tp0)) { if (!jl_subtype(jl_tparam0(a), ((jl_tvar_t*)tp0)->ub, 0)) return 0; } else { if (!jl_types_equal(jl_tparam0(a), tp0)) return 0; } } else if (!is_kind(a) || !jl_is_typevar(tp0) || ((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type) { // manually unroll jl_subtype(a, decl) // where `a` can be a subtype like TypeConstructor // and decl is Type{T} return 0; } } else if (decl == (jl_value_t*)jl_any_type) { } else { if (jl_is_type_type(a)) // decl is not Type, because it would be caught above a = jl_typeof(jl_tparam0(a)); if (!jl_types_equal(a, decl)) return 0; } } if (va) { jl_value_t *decl = jl_field_type(sig, i); if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(types[i], t, 0)) return 0; } return 1; } return 1; }
static int sig_match_by_type_simple(jl_value_t **types, size_t n, jl_tupletype_t *sig, size_t lensig, int va) { size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = jl_field_type(sig, i); jl_value_t *a = types[i]; jl_value_t *unw = jl_is_unionall(decl) ? ((jl_unionall_t*)decl)->body : decl; if (jl_is_type_type(unw)) { jl_value_t *tp0 = jl_tparam0(unw); if (jl_is_type_type(a)) { if (jl_is_typevar(tp0)) { // in the case of Type{_}, the types don't have to match exactly. // this is cached as `Type{T} where T`. if (((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type && !jl_subtype(jl_tparam0(a), ((jl_tvar_t*)tp0)->ub)) return 0; } else { if (!(jl_typeof(jl_tparam0(a)) == jl_typeof(tp0) && jl_types_equal(jl_tparam0(a), tp0))) return 0; } } else if (!jl_is_kind(a) || !jl_is_typevar(tp0) || ((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type) { // manually unroll jl_subtype(a, decl) // where `a` can be a subtype and decl is Type{T} return 0; } } else if (decl == (jl_value_t*)jl_any_type) { } else { if (jl_is_type_type(a)) // decl is not Type, because it would be caught above a = jl_typeof(jl_tparam0(a)); if (!jl_types_equal(a, decl)) return 0; } } if (va) { jl_value_t *decl = jl_unwrap_unionall(jl_field_type(sig, i)); if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_tparam0(decl); if (jl_is_typevar(t)) t = ((jl_tvar_t*)t)->ub; for(; i < n; i++) { if (!jl_subtype(types[i], t)) return 0; } return 1; } return 1; }
// count the homogeneous floating agregate size (saturating at max count of 8) unsigned isHFA(jl_datatype_t *ty, jl_datatype_t **ty0, bool *hva) const { size_t i, l = ty->layout->nfields; // handle homogeneous float aggregates if (l == 0) { if (ty != jl_float64_type && ty != jl_float32_type) return 9; *hva = false; if (*ty0 == NULL) *ty0 = ty; else if (*hva || ty->size != (*ty0)->size) return 9; return 1; } // handle homogeneous vector aggregates jl_datatype_t *fld0 = (jl_datatype_t*)jl_field_type(ty, 0); if (!jl_is_datatype(fld0) || ty->name == jl_vecelement_typename) return 9; if (fld0->name == jl_vecelement_typename) { if (!jl_is_primitivetype(jl_tparam0(fld0)) || jl_datatype_size(ty) > 16) return 9; if (l != 1 && l != 2 && l != 4 && l != 8 && l != 16) return 9; *hva = true; if (*ty0 == NULL) *ty0 = ty; else if (!*hva || ty->size != (*ty0)->size) return 9; for (i = 1; i < l; i++) { jl_datatype_t *fld = (jl_datatype_t*)jl_field_type(ty, i); if (fld != fld0) return 9; } return 1; } // recurse through other struct types int n = 0; for (i = 0; i < l; i++) { jl_datatype_t *fld = (jl_datatype_t*)jl_field_type(ty, i); if (!jl_is_datatype(fld) || ((jl_datatype_t*)fld)->layout == NULL) return 9; n += isHFA((jl_datatype_t*)fld, ty0, hva); if (n > 8) return 9; } return n; }
// See comment above for an explanation of NOINLINE. static int NOINLINE compare_fields(jl_value_t *a, jl_value_t *b, jl_datatype_t *dt) { size_t nf = jl_datatype_nfields(dt); for (size_t f=0; f < nf; f++) { size_t offs = jl_field_offset(dt, f); char *ao = (char*)jl_data_ptr(a) + offs; char *bo = (char*)jl_data_ptr(b) + offs; int eq; if (jl_field_isptr(dt, f)) { jl_value_t *af = *(jl_value_t**)ao; jl_value_t *bf = *(jl_value_t**)bo; if (af == bf) eq = 1; else if (af==NULL || bf==NULL) eq = 0; else eq = jl_egal(af, bf); } else { jl_datatype_t *ft = (jl_datatype_t*)jl_field_type(dt, f); if (!ft->layout->haspadding) { eq = bits_equal(ao, bo, jl_field_size(dt, f)); } else { assert(jl_datatype_nfields(ft) > 0); eq = compare_fields((jl_value_t*)ao, (jl_value_t*)bo, ft); } } if (!eq) return 0; } return 1; }
static inline union jl_typemap_t mtcache_hash_lookup(const struct jl_ordereddict_t *a, jl_value_t *ty, int8_t tparam, int8_t offs) { uintptr_t uid = ((jl_datatype_t*)ty)->uid; union jl_typemap_t ml; ml.unknown = jl_nothing; if (!uid) return ml; size_t idx = jl_intref(a->indexes, uid & (a->indexes->nrows-1)); if (idx > 0) { ml.unknown = jl_array_ptr_ref(a->values, idx - 1); if (ml.unknown == jl_nothing) return ml; jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { assert(jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_entry_type); t = jl_field_type(ml.leaf->sig, offs); if (tparam) t = jl_tparam0(t); } if (t != ty) ml.unknown = jl_nothing; } return ml; }
static void mtcache_rehash(struct jl_ordereddict_t *pa, size_t newlen, jl_value_t *parent, int8_t tparam, int8_t offs) { size_t i, nval = jl_array_len(pa->values); jl_array_t *n = jl_alloc_int_1d(nval + 1, newlen); for (i = 1; i <= nval; i++) { union jl_typemap_t ml; ml.unknown = jl_array_ptr_ref(pa->values, i - 1); if (ml.unknown == jl_nothing) continue; jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { assert(jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_entry_type); t = jl_field_type(ml.leaf->sig, offs); if (tparam) t = jl_tparam0(t); } uintptr_t uid = ((jl_datatype_t*)t)->uid; size_t newi = uid & (newlen - 1); if (jl_intref(n, newi) == 0) { jl_intset(n, newi, i); } else { // hash collision: start over after doubling the size again i = 0; newlen *= 2; n = jl_alloc_int_1d(nval + 1, newlen); } } pa->indexes = n; jl_gc_wb(parent, n); }
static int jl_typemap_intersection_array_visitor(struct jl_ordereddict_t *a, jl_value_t *ty, int tparam, int offs, struct typemap_intersection_env *closure) { size_t i, l = jl_array_len(a->values); union jl_typemap_t *data = (union jl_typemap_t*)jl_array_data(a->values); for (i = 0; i < l; i++) { union jl_typemap_t ml = data[i]; if (ml.unknown == jl_nothing) continue; jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { t = jl_field_type(ml.leaf->sig, offs); if (tparam) t = jl_tparam0(t); } if (ty == (jl_value_t*)jl_any_type || // easy case: Any always matches (tparam ? // need to compute `ty <: Type{t}` (jl_is_uniontype(ty) || // punt on Union{...} right now jl_typeof(t) == ty || // deal with kinds (e.g. ty == DataType && t == Type{t}) (jl_is_type_type(ty) && (jl_is_typevar(jl_tparam0(ty)) ? jl_subtype(t, ((jl_tvar_t*)jl_tparam0(ty))->ub, 0) : // deal with ty == Type{<:T} jl_subtype(t, jl_tparam0(ty), 0)))) // deal with ty == Type{T{#<:T}} : jl_subtype(t, ty, 0))) // `t` is a leaftype, so intersection test becomes subtype if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0; } return 1; }
static int jl_typemap_intersection_array_visitor(struct jl_ordereddict_t *a, jl_value_t *ty, int tparam, int offs, struct typemap_intersection_env *closure) { size_t i, l = jl_array_len(a->values); union jl_typemap_t *data = (union jl_typemap_t*)jl_array_data(a->values); for (i = 0; i < l; i++) { union jl_typemap_t ml = data[i]; if (ml.unknown == jl_nothing) continue; jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { t = jl_field_type(jl_unwrap_unionall((jl_value_t*)ml.leaf->sig), offs); if (tparam) t = jl_tparam0(t); } // `t` is a leaftype, so intersection test becomes subtype if (ty == (jl_value_t*)jl_any_type || // easy case: Any always matches (tparam ? (jl_typeof(t) == ty || jl_isa(t, ty)) // (Type{t} <: ty), where is_leaf_type(t) => isa(t, ty) : (t == ty || jl_subtype(t, ty)))) { if (!jl_typemap_intersection_visitor(ml, offs + 1, closure)) return 0; } } return 1; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); al = ((jl_datatype_t*)ty)->alignment; st->fields[i].isptr = 0; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; st->fields[i].isptr = 1; ptrfree = 0; } if (al != 0) { sz = LLT_ALIGN(sz, al); if (al > alignm) alignm = al; } st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); st->pointerfree = ptrfree && !st->abstract; }
// Determine if homogeneous tuple with fields of type t will have // a special alignment beyond normal Julia rules. // Return special alignment if one exists, 0 if normal alignment rules hold. // A non-zero result *must* match the LLVM rules for a vector type <nfields x t>. // For sake of Ahead-Of-Time (AOT) compilation, this routine has to work // without LLVM being available. unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *t) { if (!jl_is_vecelement_type(t)) return 0; // LLVM 3.7 and 3.8 either crash or generate wrong code for many // SIMD vector sizes N. It seems the rule is that N can have at // most 2 non-zero bits. (This is true at least for N<=100.) See // also <https://llvm.org/bugs/show_bug.cgi?id=27708>. size_t mask = nfields; // See e.g. // <https://graphics.stanford.edu/%7Eseander/bithacks.html> for an // explanation of this bit-counting algorithm. mask &= mask-1; // clear least-significant 1 if present mask &= mask-1; // clear another 1 if (mask) return 0; // nfields has more than two 1s assert(jl_datatype_nfields(t)==1); jl_value_t *ty = jl_field_type(t, 0); if (!jl_is_bitstype(ty)) // LLVM requires that a vector element be a primitive type. // LLVM allows pointer types as vector elements, but until a // motivating use case comes up for Julia, we reject pointers. return 0; size_t elsz = jl_datatype_size(ty); if (elsz>8 || (1<<elsz & 0x116) == 0) // Element size is not 1, 2, 4, or 8. return 0; size_t size = nfields*elsz; // LLVM's alignment rule for vectors seems to be to round up to // a power of two, even if that's overkill for the target hardware. size_t alignment=1; for( ; size>alignment; alignment*=2 ) continue; return alignment; }
// Determine if homogeneous tuple with fields of type t will have // a special alignment beyond normal Julia rules. // Return special alignment if one exists, 0 if normal alignment rules hold. // A non-zero result *must* match the LLVM rules for a vector type <nfields x t>. // For sake of Ahead-Of-Time (AOT) compilation, this routine has to work // without LLVM being available. unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *t) { if (!is_vecelement_type(t)) return 0; if (nfields>16 || (1<<nfields & 0x1157C) == 0) // Number of fields is not 2, 3, 4, 5, 6, 8, 10, 12, or 16. return 0; assert(jl_datatype_nfields(t)==1); jl_value_t *ty = jl_field_type(t, 0); if( !jl_is_bitstype(ty) ) // LLVM requires that a vector element be a primitive type. // LLVM allows pointer types as vector elements, but until a // motivating use case comes up for Julia, we reject pointers. return 0; size_t elsz = jl_datatype_size(ty); if (elsz>8 || (1<<elsz & 0x116) == 0) // Element size is not 1, 2, 4, or 8. return 0; size_t size = nfields*elsz; // LLVM's alignment rule for vectors seems to be to round up to // a power of two, even if that's overkill for the target hardware. size_t alignment=1; for( ; size>alignment; alignment*=2 ) continue; return alignment; }
static void mtcache_rehash(jl_array_t **pa, jl_value_t *parent, int8_t tparam, int8_t offs) { size_t i, len = jl_array_len(*pa); size_t newlen = next_power_of_two(len) * 2; jl_value_t **d = (jl_value_t**)jl_array_data(*pa); jl_array_t *n = jl_alloc_vec_any(newlen); for (i = 1; i <= len; i++) { union jl_typemap_t ml; ml.unknown = d[i - 1]; if (ml.unknown != NULL && ml.unknown != jl_nothing) { jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { t = jl_field_type(ml.leaf->sig, offs); if (tparam) t = jl_tparam0(t); } uintptr_t uid = ((jl_datatype_t*)t)->uid; size_t idx = uid & (newlen - 1); if (((jl_value_t**)n->data)[idx] == NULL) { ((jl_value_t**)n->data)[idx] = ml.unknown; } else { // hash collision: start over after doubling the size again i = 0; newlen *= 2; n = jl_alloc_vec_any(newlen); } } } *pa = n; jl_gc_wb(parent, n); }
static inline union jl_typemap_t mtcache_hash_lookup(jl_array_t *a, jl_value_t *ty, int8_t tparam, int8_t offs) { uintptr_t uid = ((jl_datatype_t*)ty)->uid; union jl_typemap_t ml; ml.unknown = jl_nothing; if (!uid) return ml; ml.unknown = jl_array_ptr_ref(a, uid & (a->nrows-1)); if (ml.unknown != NULL && ml.unknown != jl_nothing) { jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { t = jl_field_type(ml.leaf->sig, offs); if (tparam) t = jl_tparam0(t); } if (t == ty) return ml; } ml.unknown = jl_nothing; return ml; }
static uintptr_t jl_object_id_(jl_value_t *tv, jl_value_t *v) { if (tv == (jl_value_t*)jl_sym_type) return ((jl_sym_t*)v)->hash; if (tv == (jl_value_t*)jl_simplevector_type) return hash_svec((jl_svec_t*)v); jl_datatype_t *dt = (jl_datatype_t*)tv; if (dt == jl_datatype_type) { jl_datatype_t *dtv = (jl_datatype_t*)v; // `name->wrapper` is cacheable even though it contains TypeVars // that don't have stable IDs. //if (jl_egal(dtv->name->wrapper, v)) // return bitmix(~dtv->name->hash, 0xaa5566aa); return bitmix(~dtv->name->hash, hash_svec(dtv->parameters)); } if (dt == jl_typename_type) return ((jl_typename_t*)v)->hash; #ifdef _P64 if (v == jl_ANY_flag) return 0x31c472f68ee30bddULL; #else if (v == jl_ANY_flag) return 0x8ee30bdd; #endif if (dt == jl_string_type) { #ifdef _P64 return memhash_seed(jl_string_data(v), jl_string_len(v), 0xedc3b677); #else return memhash32_seed(jl_string_data(v), jl_string_len(v), 0xedc3b677); #endif } if (dt->mutabl) return inthash((uintptr_t)v); size_t sz = jl_datatype_size(tv); uintptr_t h = jl_object_id(tv); if (sz == 0) return ~h; size_t nf = jl_datatype_nfields(dt); if (nf == 0) { return bits_hash(jl_data_ptr(v), sz) ^ h; } for (size_t f=0; f < nf; f++) { size_t offs = jl_field_offset(dt, f); char *vo = (char*)jl_data_ptr(v) + offs; uintptr_t u; if (jl_field_isptr(dt, f)) { jl_value_t *f = *(jl_value_t**)vo; u = f==NULL ? 0 : jl_object_id(f); } else { jl_datatype_t *fieldtype = (jl_datatype_t*)jl_field_type(dt, f); assert(jl_is_datatype(fieldtype) && !fieldtype->abstract && !fieldtype->mutabl); if (fieldtype->layout->haspadding) u = jl_object_id_((jl_value_t*)fieldtype, (jl_value_t*)vo); else u = bits_hash(vo, jl_field_size(dt, f)); } h = bitmix(h, u); } return h; }
JL_DLLEXPORT jl_value_t *jl_get_nth_field(jl_value_t *v, size_t i) { jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v); assert(i < jl_datatype_nfields(st)); size_t offs = jl_field_offset(st,i); if (jl_field_isptr(st,i)) { return *(jl_value_t**)((char*)v + offs); } return jl_new_bits(jl_field_type(st,i), (char*)v + offs); }
static inline int is_ntuple_long(jl_value_t *v) { if (!jl_is_tuple(v)) return 0; size_t nfields = jl_nfields(v); for (size_t i = 0; i < nfields; i++) { if (jl_field_type(jl_typeof(v), i) != (jl_value_t*)jl_long_type) { return 0; } } return 1; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; assert(0 <= st->fielddesc_type && st->fielddesc_type <= 2); uint64_t max_offset = (((uint64_t)1) << (1 << (3 + st->fielddesc_type))) - 1; uint64_t max_size = max_offset >> 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; jl_field_setisptr(st, i, 0); if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; jl_field_setisptr(st, i, 1); ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } jl_field_setoffset(st, i, sz); jl_field_setsize(st, i, fsz); if (__unlikely(max_offset - sz < fsz)) jl_throw(jl_overflow_exception); sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) st->haspadding = 1; st->pointerfree = ptrfree && !st->abstract; }
static int sig_match_by_type_leaf(jl_value_t **types, jl_tupletype_t *sig, size_t n) { size_t i; for(i=0; i < n; i++) { jl_value_t *decl = jl_field_type(sig, i); jl_value_t *a = types[i]; if (jl_is_type_type(a)) // decl is not Type, because it wouldn't be leafsig a = jl_typeof(jl_tparam0(a)); if (!jl_types_equal(a, decl)) return 0; } return 1; }
JL_DLLEXPORT jl_value_t *jl_get_nth_field_checked(jl_value_t *v, size_t i) { jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v); if (i >= jl_datatype_nfields(st)) jl_bounds_error_int(v, i+1); size_t offs = jl_field_offset(st,i); if (jl_field_isptr(st,i)) { jl_value_t *fval = *(jl_value_t**)((char*)v + offs); if (fval == NULL) jl_throw(jl_undefref_exception); return fval; } return jl_new_bits(jl_field_type(st,i), (char*)v + offs); }
JL_DLLEXPORT jl_value_t *jl_get_nth_field(jl_value_t *v, size_t i) { jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v); assert(i < jl_datatype_nfields(st)); size_t offs = jl_field_offset(st, i); if (jl_field_isptr(st, i)) { return *(jl_value_t**)((char*)v + offs); } jl_value_t *ty = jl_field_type(st, i); if (jl_is_uniontype(ty)) { uint8_t sel = ((uint8_t*)v)[offs + jl_field_size(st, i) - 1]; ty = jl_nth_union_component(ty, sel); if (jl_is_datatype_singleton((jl_datatype_t*)ty)) return ((jl_datatype_t*)ty)->instance; } return jl_new_bits(ty, (char*)v + offs); }
static union jl_typemap_t *mtcache_hash_bp(struct jl_ordereddict_t *pa, jl_value_t *ty, int8_t tparam, int8_t offs, jl_value_t *parent) { if (jl_is_datatype(ty)) { uintptr_t uid = ((jl_datatype_t*)ty)->uid; if (!uid || is_kind(ty) || jl_has_typevars(ty)) // be careful not to put non-leaf types or DataType/TypeConstructor in the cache here, // since they should have a lower priority and need to go into the sorted list return NULL; if (pa->values == (void*)jl_nothing) { pa->indexes = jl_alloc_int_1d(0, INIT_CACHE_SIZE); jl_gc_wb(parent, pa->indexes); pa->values = jl_alloc_vec_any(0); jl_gc_wb(parent, pa->values); } while (1) { size_t slot = uid & (pa->indexes->nrows - 1); size_t idx = jl_intref(pa->indexes, slot); if (idx == 0) { jl_array_ptr_1d_push(pa->values, jl_nothing); idx = jl_array_len(pa->values); if (idx > jl_max_int(pa->indexes)) mtcache_rehash(pa, jl_array_len(pa->indexes), parent, tparam, offs); jl_intset(pa->indexes, slot, idx); return &((union jl_typemap_t*)jl_array_data(pa->values))[idx - 1]; } union jl_typemap_t *pml = &((union jl_typemap_t*)jl_array_data(pa->values))[idx - 1]; if (pml->unknown == jl_nothing) return pml; jl_value_t *t; if (jl_typeof(pml->unknown) == (jl_value_t*)jl_typemap_level_type) { t = pml->node->key; } else { assert(jl_typeof(pml->unknown) == (jl_value_t*)jl_typemap_entry_type); t = jl_field_type(pml->leaf->sig, offs); if (tparam) t = jl_tparam0(t); } if (t == ty) return pml; mtcache_rehash(pa, jl_array_len(pa->indexes) * 2, parent, tparam, offs); } } return NULL; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); if (__unlikely(fsz > JL_FIELD_MAX_SIZE)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; st->fields[i].isptr = 0; if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; st->fields[i].isptr = 1; ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (alsz > sz) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } if (__unlikely(sz > JL_FIELD_MAX_OFFSET)) jl_throw(jl_overflow_exception); st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); st->pointerfree = ptrfree && !st->abstract; }
Type *preferred_llvm_type(jl_datatype_t *dt, bool isret) const override { // Arguments are either scalar or passed by value size_t size = jl_datatype_size(dt); // don't need to change bitstypes if (!jl_datatype_nfields(dt)) return NULL; // legalize this into [n x f32/f64] jl_datatype_t *ty0 = NULL; bool hva = false; int hfa = isHFA(dt, &ty0, &hva); if (hfa <= 8) { if (ty0 == jl_float32_type) { return ArrayType::get(T_float32, hfa); } else if (ty0 == jl_float64_type) { return ArrayType::get(T_float64, hfa); } else { jl_datatype_t *vecty = (jl_datatype_t*)jl_field_type(ty0, 0); assert(jl_is_datatype(vecty) && vecty->name == jl_vecelement_typename); jl_value_t *elemty = jl_tparam0(vecty); assert(jl_is_primitivetype(elemty)); Type *ety = julia_type_to_llvm(elemty); Type *vty = VectorType::get(ety, jl_datatype_nfields(ty0)); return ArrayType::get(vty, hfa); } } // rewrite integer-sized (non-HFA) struct to an array // the bitsize of the integer gives the desired alignment if (size > 8) { if (jl_datatype_align(dt) <= 8) { return ArrayType::get(T_int64, (size + 7) / 8); } else { Type *T_int128 = Type::getIntNTy(jl_LLVMContext, 128); return ArrayType::get(T_int128, (size + 15) / 16); } } return Type::getIntNTy(jl_LLVMContext, size * 8); }
// See comment above for an explanation of NOINLINE. static int NOINLINE compare_fields(jl_value_t *a, jl_value_t *b, jl_datatype_t *dt) { size_t f, nf = jl_datatype_nfields(dt); for (f = 0; f < nf; f++) { size_t offs = jl_field_offset(dt, f); char *ao = (char*)jl_data_ptr(a) + offs; char *bo = (char*)jl_data_ptr(b) + offs; if (jl_field_isptr(dt, f)) { jl_value_t *af = *(jl_value_t**)ao; jl_value_t *bf = *(jl_value_t**)bo; if (af != bf) { if (af == NULL || bf == NULL) return 0; if (!jl_egal(af, bf)) return 0; } } else { jl_datatype_t *ft = (jl_datatype_t*)jl_field_type(dt, f); if (jl_is_uniontype(ft)) { uint8_t asel = ((uint8_t*)ao)[jl_field_size(dt, f) - 1]; uint8_t bsel = ((uint8_t*)bo)[jl_field_size(dt, f) - 1]; if (asel != bsel) return 0; ft = (jl_datatype_t*)jl_nth_union_component((jl_value_t*)ft, asel); } if (!ft->layout->haspadding) { if (!bits_equal(ao, bo, ft->size)) return 0; } else { assert(jl_datatype_nfields(ft) > 0); if (!compare_fields((jl_value_t*)ao, (jl_value_t*)bo, ft)) return 0; } } } return 1; }
JL_DLLEXPORT jl_value_t *jl_get_nth_field_checked(jl_value_t *v, size_t i) { jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v); if (i >= jl_datatype_nfields(st)) jl_bounds_error_int(v, i + 1); size_t offs = jl_field_offset(st, i); if (jl_field_isptr(st, i)) { jl_value_t *fval = *(jl_value_t**)((char*)v + offs); if (fval == NULL) jl_throw(jl_undefref_exception); return fval; } jl_value_t *ty = jl_field_type(st, i); if (jl_is_uniontype(ty)) { size_t fsz = jl_field_size(st, i); uint8_t sel = ((uint8_t*)v)[offs + fsz - 1]; ty = jl_nth_union_component(ty, sel); if (jl_is_datatype_singleton((jl_datatype_t*)ty)) return ((jl_datatype_t*)ty)->instance; } return jl_new_bits(ty, (char*)v + offs); }
JL_DLLEXPORT void jl_set_nth_field(jl_value_t *v, size_t i, jl_value_t *rhs) { jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v); size_t offs = jl_field_offset(st, i); if (jl_field_isptr(st, i)) { *(jl_value_t**)((char*)v + offs) = rhs; if (rhs != NULL) jl_gc_wb(v, rhs); } else { jl_value_t *ty = jl_field_type(st, i); if (jl_is_uniontype(ty)) { uint8_t *psel = &((uint8_t*)v)[offs + jl_field_size(st, i) - 1]; unsigned nth = 0; if (!jl_find_union_component(ty, jl_typeof(rhs), &nth)) assert(0 && "invalid field assignment to isbits union"); *psel = nth; if (jl_is_datatype_singleton((jl_datatype_t*)jl_typeof(rhs))) return; } jl_assign_bits((char*)v + offs, rhs); } }
static int jl_typemap_intersection_array_visitor(jl_array_t *a, jl_value_t *ty, int tparam, int offs, struct typemap_intersection_env *closure) { size_t i, l = jl_array_len(a); jl_value_t **data = (jl_value_t**)jl_array_data(a); for (i = 0; i < l; i++) { union jl_typemap_t ml = ((union jl_typemap_t*)data)[i]; if (ml.unknown != NULL && ml.unknown != jl_nothing) { jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { t = jl_field_type(ml.leaf->sig, offs); if (tparam) t = jl_tparam0(t); } // TODO: fast path: test key `t` if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0; } } return 1; }
static union jl_typemap_t *mtcache_hash_bp(jl_array_t **pa, jl_value_t *ty, int8_t tparam, int8_t offs, jl_value_t *parent) { if (jl_is_datatype(ty)) { uintptr_t uid = ((jl_datatype_t*)ty)->uid; if (!uid || is_kind(ty) || jl_has_typevars(ty)) // be careful not to put non-leaf types or DataType/TypeConstructor in the cache here, // since they should have a lower priority and need to go into the sorted list return NULL; if (*pa == (void*)jl_nothing) { *pa = jl_alloc_vec_any(INIT_CACHE_SIZE); jl_gc_wb(parent, *pa); } while (1) { union jl_typemap_t *pml = &((union jl_typemap_t*)jl_array_data(*pa))[uid & ((*pa)->nrows-1)]; union jl_typemap_t ml = *pml; if (ml.unknown == NULL || ml.unknown == jl_nothing) { pml->unknown = jl_nothing; return pml; } jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { t = jl_field_type(ml.leaf->sig, offs); if (tparam) t = jl_tparam0(t); } if (t == ty) return pml; mtcache_rehash(pa, parent, tparam, offs); } } return NULL; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; int homogeneous = 1; jl_value_t *lastty = NULL; assert(0 <= st->fielddesc_type && st->fielddesc_type <= 2); uint64_t max_offset = (((uint64_t)1) << (1 << (3 + st->fielddesc_type))) - 1; uint64_t max_size = max_offset >> 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; jl_field_setisptr(st, i, 0); if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; jl_field_setisptr(st, i, 1); ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } homogeneous &= lastty==NULL || lastty==ty; lastty = ty; jl_field_setoffset(st, i, sz); jl_field_setsize(st, i, fsz); if (__unlikely(max_offset - sz < fsz)) jl_throw(jl_overflow_exception); sz += fsz; } if (homogeneous && lastty!=NULL && jl_is_tuple_type(st)) { // Some tuples become LLVM vectors with stronger alignment than what was calculated above. unsigned al = jl_special_vector_alignment(jl_datatype_nfields(st), lastty); assert(al % alignm == 0); if (al) alignm = al; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) st->haspadding = 1; st->pointerfree = ptrfree && !st->abstract; }