int sigs_eq(jl_value_t *a, jl_value_t *b, int useenv) { if (jl_has_typevars(a) || jl_has_typevars(b)) { return jl_types_equal_generic(a,b,useenv); } return jl_subtype(a, b, 0) && jl_subtype(b, a, 0); }
static int jl_typemap_intersection_array_visitor(struct jl_ordereddict_t *a, jl_value_t *ty, int tparam, int offs, struct typemap_intersection_env *closure) { size_t i, l = jl_array_len(a->values); union jl_typemap_t *data = (union jl_typemap_t*)jl_array_data(a->values); for (i = 0; i < l; i++) { union jl_typemap_t ml = data[i]; if (ml.unknown == jl_nothing) continue; jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { t = jl_field_type(ml.leaf->sig, offs); if (tparam) t = jl_tparam0(t); } if (ty == (jl_value_t*)jl_any_type || // easy case: Any always matches (tparam ? // need to compute `ty <: Type{t}` (jl_is_uniontype(ty) || // punt on Union{...} right now jl_typeof(t) == ty || // deal with kinds (e.g. ty == DataType && t == Type{t}) (jl_is_type_type(ty) && (jl_is_typevar(jl_tparam0(ty)) ? jl_subtype(t, ((jl_tvar_t*)jl_tparam0(ty))->ub, 0) : // deal with ty == Type{<:T} jl_subtype(t, jl_tparam0(ty), 0)))) // deal with ty == Type{T{#<:T}} : jl_subtype(t, ty, 0))) // `t` is a leaftype, so intersection test becomes subtype if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0; } return 1; }
static void check_supertype(jl_value_t *super, char *name) { if (!jl_is_tag_type(super) || super == (jl_value_t*)jl_sym_type || super == (jl_value_t*)jl_undef_type || jl_subtype(super,(jl_value_t*)jl_type_type,0) || jl_subtype(super,(jl_value_t*)jl_array_type,0)) { jl_errorf("invalid subtyping in definition of %s", name); } }
int sigs_eq(jl_value_t *a, jl_value_t *b, int useenv) { // useenv == 0 : subtyping + ensure typevars correspond // useenv == 1 : subtyping + ensure typevars correspond + fail if bound != bound in some typevar match // useenv == 2 : ignore typevars (because UnionAll getting lost in intersection can cause jl_types_equal to fail in the wrong direction for some purposes) if (useenv != 2 && (jl_has_typevars(a) || jl_has_typevars(b))) { return jl_types_equal_generic(a, b, useenv); } return jl_subtype(a, b, 0) && jl_subtype(b, a, 0); }
static int sig_match_by_type_simple(jl_value_t **types, size_t n, jl_tupletype_t *sig, size_t lensig, int va) { size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = jl_field_type(sig, i); jl_value_t *a = types[i]; if (jl_is_type_type(decl)) { jl_value_t *tp0 = jl_tparam0(decl); if (jl_is_type_type(a)) { if (tp0 == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else if (jl_is_typevar(tp0)) { if (!jl_subtype(jl_tparam0(a), ((jl_tvar_t*)tp0)->ub, 0)) return 0; } else { if (!jl_types_equal(jl_tparam0(a), tp0)) return 0; } } else if (!is_kind(a) || !jl_is_typevar(tp0) || ((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type) { // manually unroll jl_subtype(a, decl) // where `a` can be a subtype like TypeConstructor // and decl is Type{T} return 0; } } else if (decl == (jl_value_t*)jl_any_type) { } else { if (jl_is_type_type(a)) // decl is not Type, because it would be caught above a = jl_typeof(jl_tparam0(a)); if (!jl_types_equal(a, decl)) return 0; } } if (va) { jl_value_t *decl = jl_field_type(sig, i); if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(types[i], t, 0)) return 0; } return 1; } return 1; }
static int sig_match_by_type_simple(jl_value_t **types, size_t n, jl_tupletype_t *sig, size_t lensig, int va) { size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = jl_field_type(sig, i); jl_value_t *a = types[i]; jl_value_t *unw = jl_is_unionall(decl) ? ((jl_unionall_t*)decl)->body : decl; if (jl_is_type_type(unw)) { jl_value_t *tp0 = jl_tparam0(unw); if (jl_is_type_type(a)) { if (jl_is_typevar(tp0)) { // in the case of Type{_}, the types don't have to match exactly. // this is cached as `Type{T} where T`. if (((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type && !jl_subtype(jl_tparam0(a), ((jl_tvar_t*)tp0)->ub)) return 0; } else { if (!(jl_typeof(jl_tparam0(a)) == jl_typeof(tp0) && jl_types_equal(jl_tparam0(a), tp0))) return 0; } } else if (!jl_is_kind(a) || !jl_is_typevar(tp0) || ((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type) { // manually unroll jl_subtype(a, decl) // where `a` can be a subtype and decl is Type{T} return 0; } } else if (decl == (jl_value_t*)jl_any_type) { } else { if (jl_is_type_type(a)) // decl is not Type, because it would be caught above a = jl_typeof(jl_tparam0(a)); if (!jl_types_equal(a, decl)) return 0; } } if (va) { jl_value_t *decl = jl_unwrap_unionall(jl_field_type(sig, i)); if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_tparam0(decl); if (jl_is_typevar(t)) t = ((jl_tvar_t*)t)->ub; for(; i < n; i++) { if (!jl_subtype(types[i], t)) return 0; } return 1; } return 1; }
static inline int sig_match_simple(jl_value_t **args, size_t n, jl_value_t **sig, int va, size_t lensig) { // NOTE: This function is a performance hot spot!! size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = sig[i]; jl_value_t *a = args[i]; if (decl == (jl_value_t*)jl_any_type) { } else if ((jl_value_t*)jl_typeof(a) == decl) { /* we are only matching concrete types here, and those types are hash-consed, so pointer comparison should work. */ } else if (jl_is_type_type(decl) && jl_is_type(a)) { jl_value_t *tp0 = jl_tparam0(decl); if (tp0 == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else if (jl_is_typevar(tp0)) { if (!jl_subtype(a, ((jl_tvar_t*)tp0)->ub, 0)) return 0; } else { if (a!=tp0 && !jl_types_equal(a,tp0)) return 0; } } else { return 0; } } if (va) { jl_value_t *decl = sig[i]; if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(args[i], t, 1)) return 0; } return 1; } return 1; }
static inline int cache_match(jl_value_t **args, size_t n, jl_tuple_t *sig, int va) { if (sig->length > n) { if (n != sig->length-1) return 0; } size_t i; for(i=0; i < n; i++) { jl_value_t *decl = jl_tupleref(sig, i); if (i == sig->length-1) { if (va) { jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(args[i], t, 1)) return 0; } return 1; } } jl_value_t *a = args[i]; if (jl_is_tuple(decl)) { // tuples don't have to match exactly, to avoid caching // signatures for tuples of every length if (!jl_subtype(a, decl, 1)) return 0; } else if (jl_is_type_type(decl) && jl_is_nontuple_type(a)) { //*** if (jl_tparam0(decl) == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else { if (a!=jl_tparam0(decl) && !jl_types_equal(a,jl_tparam0(decl))) return 0; } } else if (decl == (jl_value_t*)jl_any_type) { } else { /* we know there are only concrete types here, and types are hash-consed, so pointer comparison should work. */ if ((jl_value_t*)jl_typeof(a) != decl) return 0; } } return 1; }
static int cache_match_by_type(jl_value_t **types, size_t n, jl_tuple_t *sig, int va) { if (!va && n > sig->length) return 0; if (sig->length > n) { if (!(n == sig->length-1 && va)) return 0; } size_t i; for(i=0; i < n; i++) { jl_value_t *decl = jl_tupleref(sig, i); if (i == sig->length-1) { if (va) { jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(types[i], t, 0)) return 0; } return 1; } } jl_value_t *a = types[i]; if (jl_is_tuple(decl)) { // tuples don't have to match exactly, to avoid caching // signatures for tuples of every length if (!jl_subtype(a, decl, 0)) return 0; } else if (jl_is_tag_type(a) && jl_is_tag_type(decl) && ((jl_tag_type_t*)decl)->name == jl_type_type->name && ((jl_tag_type_t*)a )->name == jl_type_type->name) { if (jl_tparam0(decl) == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else { if (!jl_types_equal(jl_tparam0(a), jl_tparam0(decl))) { return 0; } } } else if (decl == (jl_value_t*)jl_any_type) { } else { if (!jl_types_equal(a, decl)) return 0; } } return 1; }
void jl_set_datatype_super(jl_datatype_t *tt, jl_value_t *super) { if (!jl_is_datatype(super) || super == (jl_value_t*)jl_undef_type || !jl_is_abstracttype(super) || jl_subtype(super,(jl_value_t*)jl_vararg_type,0) || jl_subtype(super,(jl_value_t*)jl_type_type,0)) { jl_errorf("invalid subtyping in definition of %s",tt->name->name->name); } tt->super = (jl_datatype_t*)super; if (jl_tuple_len(tt->parameters) > 0) { tt->name->cache = (jl_value_t*)jl_null; jl_reinstantiate_inner_types(tt); } }
static int equiv_type(jl_datatype_t *dta, jl_datatype_t *dtb) { return (jl_typeof(dta) == jl_typeof(dtb) && dta->name->name == dtb->name->name && dta->abstract == dtb->abstract && dta->mutabl == dtb->mutabl && dta->size == dtb->size && dta->ninitialized == dtb->ninitialized && equiv_svec_dt(dta->parameters, dtb->parameters) && equiv_svec_dt(dta->types, dtb->types) && jl_subtype((jl_value_t*)dta->super, (jl_value_t*)dtb->super, 0) && jl_subtype((jl_value_t*)dtb->super, (jl_value_t*)dta->super, 0) && jl_egal((jl_value_t*)dta->name->names, (jl_value_t*)dtb->name->names)); }
void jl_set_datatype_super(jl_datatype_t *tt, jl_value_t *super) { if (!jl_is_datatype(super) || !jl_is_abstracttype(super) || tt->name == ((jl_datatype_t*)super)->name || jl_subtype(super,(jl_value_t*)jl_vararg_type,0) || jl_is_tuple_type(super) || jl_subtype(super,(jl_value_t*)jl_type_type,0) || super == (jl_value_t*)jl_builtin_type) { jl_errorf("invalid subtyping in definition of %s", jl_symbol_name(tt->name->name)); } tt->super = (jl_datatype_t*)super; jl_gc_wb(tt, tt->super); }
void jl_arrayset(jl_array_t *a, size_t i, jl_value_t *rhs) { jl_value_t *el_type = jl_tparam0(jl_typeof(a)); if (el_type != (jl_value_t*)jl_any_type) { if (!jl_subtype(rhs, el_type, 1)) jl_type_error("arrayset", el_type, rhs); } if (jl_is_bits_type(el_type)) { size_t nb = a->elsize; switch (nb) { case 1: ((int8_t*)a->data)[i] = *(int8_t*)jl_bits_data(rhs); break; case 2: ((int16_t*)a->data)[i] = *(int16_t*)jl_bits_data(rhs); break; case 4: ((int32_t*)a->data)[i] = *(int32_t*)jl_bits_data(rhs); break; case 8: ((int64_t*)a->data)[i] = *(int64_t*)jl_bits_data(rhs); break; case 16: ((bits128_t*)a->data)[i] = *(bits128_t*)jl_bits_data(rhs); break; default: memcpy(&((char*)a->data)[i*nb], jl_bits_data(rhs), nb); } } else { ((jl_value_t**)a->data)[i] = rhs; } }
static int jl_typemap_intersection_array_visitor(struct jl_ordereddict_t *a, jl_value_t *ty, int tparam, int offs, struct typemap_intersection_env *closure) { size_t i, l = jl_array_len(a->values); union jl_typemap_t *data = (union jl_typemap_t*)jl_array_data(a->values); for (i = 0; i < l; i++) { union jl_typemap_t ml = data[i]; if (ml.unknown == jl_nothing) continue; jl_value_t *t; if (jl_typeof(ml.unknown) == (jl_value_t*)jl_typemap_level_type) { t = ml.node->key; } else { t = jl_field_type(jl_unwrap_unionall((jl_value_t*)ml.leaf->sig), offs); if (tparam) t = jl_tparam0(t); } // `t` is a leaftype, so intersection test becomes subtype if (ty == (jl_value_t*)jl_any_type || // easy case: Any always matches (tparam ? (jl_typeof(t) == ty || jl_isa(t, ty)) // (Type{t} <: ty), where is_leaf_type(t) => isa(t, ty) : (t == ty || jl_subtype(t, ty)))) { if (!jl_typemap_intersection_visitor(ml, offs + 1, closure)) return 0; } } return 1; }
static inline int sig_match_simple(jl_value_t **args, size_t n, jl_value_t **sig, int va, size_t lensig) { // NOTE: This function is a performance hot spot!! size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = sig[i]; jl_value_t *a = args[i]; if (decl == (jl_value_t*)jl_any_type || ((jl_value_t*)jl_typeof(a) == decl)) { /* we are only matching concrete types here, and those types are hash-consed, so pointer comparison should work. */ continue; } jl_value_t *unw = jl_is_unionall(decl) ? ((jl_unionall_t*)decl)->body : decl; if (jl_is_type_type(unw) && jl_is_type(a)) { jl_value_t *tp0 = jl_tparam0(unw); if (jl_is_typevar(tp0)) { // in the case of Type{_}, the types don't have to match exactly. // this is cached as `Type{T} where T`. if (((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type && !jl_subtype(a, ((jl_tvar_t*)tp0)->ub)) return 0; } else { if (a != tp0) { if (jl_typeof(a) != jl_typeof(tp0)) return 0; jl_datatype_t *da = (jl_datatype_t*)a; jl_datatype_t *dt = (jl_datatype_t*)tp0; while (jl_is_unionall(da)) da = (jl_datatype_t*)((jl_unionall_t*)da)->body; while (jl_is_unionall(dt)) dt = (jl_datatype_t*)((jl_unionall_t*)dt)->body; if (jl_is_datatype(da) && jl_is_datatype(dt) && da->name != dt->name) return 0; if (!jl_types_equal(a, tp0)) return 0; } } } else { return 0; } } if (va) { jl_value_t *decl = sig[i]; if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_unwrap_vararg(decl); for(; i < n; i++) { if (!jl_isa(args[i], t)) return 0; } return 1; } return 1; }
jl_value_t *jl_lookup_match(jl_value_t *a, jl_value_t *b, jl_svec_t **penv, jl_svec_t *tvars) { jl_value_t *ti = jl_type_intersection_matching(a, b, penv, tvars); if (ti == (jl_value_t*)jl_bottom_type) return ti; JL_GC_PUSH1(&ti); assert(jl_is_svec(*penv)); int l = jl_svec_len(*penv); for(int i=0; i < l; i++) { jl_value_t *val = jl_svecref(*penv,i); /* since "a" is a concrete type, we assume that (a∩b != Union{}) => a<:b. However if a static parameter is forced to equal Union{}, then part of "b" might become Union{}, and therefore a subtype of "a". For example (Type{Union{}},Int) ∩ (Type{T},T) issue #5254 */ if (val == (jl_value_t*)jl_bottom_type) { if (!jl_subtype(a, ti, 0)) { JL_GC_POP(); return (jl_value_t*)jl_bottom_type; } } } JL_GC_POP(); return ti; }
// this is a heuristic for allowing "redefining" a type to something identical static int equiv_svec_dt(jl_svec_t *sa, jl_svec_t *sb) { size_t i, l = jl_svec_len(sa); if (l != jl_svec_len(sb)) return 0; for (i = 0; i < l; i++) { jl_value_t *a = jl_svecref(sa, i); jl_value_t *b = jl_svecref(sb, i); if (jl_typeof(a) != jl_typeof(b)) return 0; if (jl_is_typevar(a) && ((jl_tvar_t*)a)->name != ((jl_tvar_t*)b)->name) return 0; if (!jl_subtype(a, b, 0) || !jl_subtype(b, a, 0)) return 0; } return 1; }
static jl_value_t *convert(jl_type_t *to, jl_value_t *x, jl_function_t *conv_f) { jl_value_t *args[2]; if (jl_subtype(x, (jl_value_t*)to, 1)) return x; args[0] = (jl_value_t*)to; args[1] = x; return jl_apply(conv_f, args, 2); }
void jl_set_datatype_super(jl_datatype_t *tt, jl_value_t *super) { if (!jl_is_datatype(super) || !jl_is_abstracttype(super) || tt->name == ((jl_datatype_t*)super)->name || jl_subtype(super,(jl_value_t*)jl_vararg_type,0) || jl_is_tuple_type(super) || jl_subtype(super,(jl_value_t*)jl_type_type,0)) { jl_errorf("invalid subtyping in definition of %s",tt->name->name->name); } tt->super = (jl_datatype_t*)super; jl_gc_wb(tt, tt->super); if (jl_svec_len(tt->parameters) > 0) { tt->name->cache = jl_emptysvec; tt->name->linearcache = jl_emptysvec; jl_reinstantiate_inner_types(tt); } }
/* Method caches are divided into three parts: one for signatures where the first argument is a singleton kind (Type{Foo}), one indexed by the UID of the first argument's type in normal cases, and a fallback table of everything else. Note that the "primary key" is the type of the first *argument*, since there tends to be lots of variation there. The type of the 0th argument (the function) is always the same for most functions. */ static jl_typemap_entry_t *jl_typemap_assoc_by_type_(jl_typemap_entry_t *ml, jl_value_t *types, jl_svec_t **penv, size_t world, size_t max_world_mask) { jl_value_t *unw = jl_unwrap_unionall((jl_value_t*)types); int isua = jl_is_unionall(types); size_t n = jl_field_count(unw); int typesisva = n == 0 ? 0 : jl_is_vararg_type(jl_tparam(unw, n-1)); for (; ml != (void*)jl_nothing; ml = ml->next) { if (world < ml->min_world || world > (ml->max_world | max_world_mask)) continue; // ignore replaced methods size_t lensig = jl_field_count(jl_unwrap_unionall((jl_value_t*)ml->sig)); if (lensig == n || (ml->va && lensig <= n+1)) { int resetenv = 0, ismatch = 1; if (ml->simplesig != (void*)jl_nothing && !isua) { size_t lensimplesig = jl_field_count(ml->simplesig); int isva = lensimplesig > 0 && jl_is_vararg_type(jl_tparam(ml->simplesig, lensimplesig - 1)); if (lensig == n || (isva && lensimplesig <= n + 1)) ismatch = sig_match_by_type_simple(jl_svec_data(((jl_datatype_t*)types)->parameters), n, ml->simplesig, lensimplesig, isva); else ismatch = 0; } if (ismatch == 0) ; // nothing else if (ml->isleafsig && !typesisva && !isua) ismatch = sig_match_by_type_leaf(jl_svec_data(((jl_datatype_t*)types)->parameters), ml->sig, lensig); else if (ml->issimplesig && !typesisva && !isua) ismatch = sig_match_by_type_simple(jl_svec_data(((jl_datatype_t*)types)->parameters), n, ml->sig, lensig, ml->va); else { ismatch = jl_subtype_matching(types, (jl_value_t*)ml->sig, penv); if (ismatch && penv) resetenv = 1; } if (ismatch) { size_t i, l; for (i = 0, l = jl_svec_len(ml->guardsigs); i < l; i++) { // see corresponding code in jl_typemap_entry_assoc_exact if (jl_subtype(types, jl_svecref(ml->guardsigs, i))) { ismatch = 0; break; } } if (ismatch) return ml; } if (resetenv) *penv = jl_emptysvec; } } return NULL; }
void jl_arrayset(jl_array_t *a, size_t i, jl_value_t *rhs) { jl_value_t *el_type = jl_tparam0(jl_typeof(a)); if (el_type != (jl_value_t*)jl_any_type) { if (!jl_subtype(rhs, el_type, 1)) jl_type_error("arrayset", el_type, rhs); } if (jl_is_bits_type(el_type)) { jl_assign_bits(&((char*)a->data)[i*a->elsize], rhs); } else { ((jl_value_t**)a->data)[i] = rhs; } }
JL_DLLEXPORT void jl_arrayset(jl_array_t *a, jl_value_t *rhs, size_t i) { assert(i < jl_array_len(a)); jl_value_t *el_type = jl_tparam0(jl_typeof(a)); if (el_type != (jl_value_t*)jl_any_type) { if (!jl_subtype(rhs, el_type, 1)) jl_type_error("arrayset", el_type, rhs); } if (!a->flags.ptrarray) { jl_assign_bits(&((char*)a->data)[i*a->elsize], rhs); } else { ((jl_value_t**)a->data)[i] = rhs; jl_gc_wb(jl_array_owner(a), rhs); } }
void jl_arrayset(jl_array_t *a, jl_value_t *rhs, size_t i) { assert(i < jl_array_len(a)); jl_value_t *el_type = jl_tparam0(jl_typeof(a)); if (el_type != (jl_value_t*)jl_any_type) { if (!jl_subtype(rhs, el_type, 1)) jl_type_error("arrayset", el_type, rhs); } if (!a->ptrarray) { jl_assign_bits(&((char*)a->data)[i*a->elsize], rhs); } else { ((jl_value_t**)a->data)[i] = rhs; jl_value_t *owner = (jl_value_t*)a; if (a->how == 3) { owner = jl_array_data_owner(a); } jl_gc_wb(owner, rhs); } }
// ccall(pointer, rettype, (argtypes...), args...) static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) { JL_NARGSV(ccall, 3); jl_value_t *ptr=NULL, *rt=NULL, *at=NULL; Value *jl_ptr=NULL; JL_GC_PUSH(&ptr, &rt, &at); ptr = static_eval(args[1], ctx, true); if (ptr == NULL) { jl_value_t *ptr_ty = expr_type(args[1], ctx); Value *arg1 = emit_unboxed(args[1], ctx); if (!jl_is_cpointer_type(ptr_ty)) { emit_typecheck(arg1, (jl_value_t*)jl_voidpointer_type, "ccall: function argument not a pointer or valid constant", ctx); } jl_ptr = emit_unbox(T_size, T_psize, arg1); } rt = jl_interpret_toplevel_expr_in(ctx->module, args[2], &jl_tupleref(ctx->sp,0), jl_tuple_len(ctx->sp)/2); if (jl_is_tuple(rt)) { std::string msg = "in " + ctx->funcName + ": ccall: missing return type"; jl_error(msg.c_str()); } at = jl_interpret_toplevel_expr_in(ctx->module, args[3], &jl_tupleref(ctx->sp,0), jl_tuple_len(ctx->sp)/2); void *fptr=NULL; char *f_name=NULL, *f_lib=NULL; if (ptr != NULL) { if (jl_is_tuple(ptr) && jl_tuple_len(ptr)==1) { ptr = jl_tupleref(ptr,0); } if (jl_is_symbol(ptr)) f_name = ((jl_sym_t*)ptr)->name; else if (jl_is_byte_string(ptr)) f_name = jl_string_data(ptr); if (f_name != NULL) { // just symbol, default to JuliaDLHandle #ifdef __WIN32__ fptr = jl_dlsym_e(jl_dl_handle, f_name); if (!fptr) { //TODO: when one of these succeeds, store the f_lib name (and clear fptr) fptr = jl_dlsym_e(jl_kernel32_handle, f_name); if (!fptr) { fptr = jl_dlsym_e(jl_ntdll_handle, f_name); if (!fptr) { fptr = jl_dlsym_e(jl_crtdll_handle, f_name); if (!fptr) { fptr = jl_dlsym(jl_winsock_handle, f_name); } } } } else { // available in process symbol table fptr = NULL; } #else // will look in process symbol table #endif } else if (jl_is_cpointer_type(jl_typeof(ptr))) { fptr = *(void**)jl_bits_data(ptr); } else if (jl_is_tuple(ptr) && jl_tuple_len(ptr)>1) { jl_value_t *t0 = jl_tupleref(ptr,0); jl_value_t *t1 = jl_tupleref(ptr,1); if (jl_is_symbol(t0)) f_name = ((jl_sym_t*)t0)->name; else if (jl_is_byte_string(t0)) f_name = jl_string_data(t0); else JL_TYPECHK(ccall, symbol, t0); if (jl_is_symbol(t1)) f_lib = ((jl_sym_t*)t1)->name; else if (jl_is_byte_string(t1)) f_lib = jl_string_data(t1); else JL_TYPECHK(ccall, symbol, t1); } else { JL_TYPECHK(ccall, pointer, ptr); } } if (f_name == NULL && fptr == NULL && jl_ptr == NULL) { JL_GC_POP(); emit_error("ccall: null function pointer", ctx); return literal_pointer_val(jl_nothing); } JL_TYPECHK(ccall, type, rt); JL_TYPECHK(ccall, tuple, at); JL_TYPECHK(ccall, type, at); jl_tuple_t *tt = (jl_tuple_t*)at; std::vector<Type *> fargt(0); std::vector<Type *> fargt_sig(0); Type *lrt = julia_type_to_llvm(rt); if (lrt == NULL) { JL_GC_POP(); return literal_pointer_val(jl_nothing); } size_t i; bool haspointers = false; bool isVa = false; size_t nargt = jl_tuple_len(tt); std::vector<AttributeWithIndex> attrs; for(i=0; i < nargt; i++) { jl_value_t *tti = jl_tupleref(tt,i); if (jl_is_seq_type(tti)) { isVa = true; tti = jl_tparam0(tti); } if (jl_is_bits_type(tti)) { // see pull req #978. need to annotate signext/zeroext for // small integer arguments. jl_bits_type_t *bt = (jl_bits_type_t*)tti; if (bt->nbits < 32) { if (jl_signed_type == NULL) { jl_signed_type = jl_get_global(jl_core_module,jl_symbol("Signed")); } #ifdef LLVM32 Attributes::AttrVal av; if (jl_signed_type && jl_subtype(tti, jl_signed_type, 0)) av = Attributes::SExt; else av = Attributes::ZExt; attrs.push_back(AttributeWithIndex::get(getGlobalContext(), i+1, ArrayRef<Attributes::AttrVal>(&av, 1))); #else Attribute::AttrConst av; if (jl_signed_type && jl_subtype(tti, jl_signed_type, 0)) av = Attribute::SExt; else av = Attribute::ZExt; attrs.push_back(AttributeWithIndex::get(i+1, av)); #endif } } Type *t = julia_type_to_llvm(tti); if (t == NULL) { JL_GC_POP(); return literal_pointer_val(jl_nothing); } fargt.push_back(t); if (!isVa) fargt_sig.push_back(t); } // check for calling convention specifier CallingConv::ID cc = CallingConv::C; jl_value_t *last = args[nargs]; if (jl_is_expr(last)) { jl_sym_t *lhd = ((jl_expr_t*)last)->head; if (lhd == jl_symbol("stdcall")) { cc = CallingConv::X86_StdCall; nargs--; } else if (lhd == jl_symbol("cdecl")) { cc = CallingConv::C; nargs--; } else if (lhd == jl_symbol("fastcall")) { cc = CallingConv::X86_FastCall; nargs--; } else if (lhd == jl_symbol("thiscall")) { cc = CallingConv::X86_ThisCall; nargs--; } } if ((!isVa && jl_tuple_len(tt) != (nargs-2)/2) || ( isVa && jl_tuple_len(tt)-1 > (nargs-2)/2)) jl_error("ccall: wrong number of arguments to C function"); // some special functions if (fptr == &jl_array_ptr) { Value *ary = emit_expr(args[4], ctx); JL_GC_POP(); return mark_julia_type(builder.CreateBitCast(emit_arrayptr(ary),lrt), rt); } // see if there are & arguments for(i=4; i < nargs+1; i+=2) { jl_value_t *argi = args[i]; if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) { haspointers = true; break; } } // make LLVM function object for the target Value *llvmf; FunctionType *functype = FunctionType::get(lrt, fargt_sig, isVa); if (jl_ptr != NULL) { null_pointer_check(jl_ptr,ctx); Type *funcptype = PointerType::get(functype,0); llvmf = builder.CreateIntToPtr(jl_ptr, funcptype); } else if (fptr != NULL) { Type *funcptype = PointerType::get(functype,0); llvmf = literal_pointer_val(fptr, funcptype); } else { void *symaddr; if (f_lib != NULL) symaddr = add_library_sym(f_name, f_lib); else symaddr = sys::DynamicLibrary::SearchForAddressOfSymbol(f_name); if (symaddr == NULL) { JL_GC_POP(); std::stringstream msg; msg << "ccall: could not find function "; msg << f_name; if (f_lib != NULL) { msg << " in library "; msg << f_lib; } emit_error(msg.str(), ctx); return literal_pointer_val(jl_nothing); } llvmf = jl_Module->getOrInsertFunction(f_name, functype); } // save temp argument area stack pointer Value *saveloc=NULL; Value *stacksave=NULL; if (haspointers) { // TODO: inline this saveloc = builder.CreateCall(save_arg_area_loc_func); stacksave = builder.CreateCall(Intrinsic::getDeclaration(jl_Module, Intrinsic::stacksave)); } // emit arguments Value *argvals[(nargs-3)/2]; int last_depth = ctx->argDepth; int nargty = jl_tuple_len(tt); for(i=4; i < nargs+1; i+=2) { int ai = (i-4)/2; jl_value_t *argi = args[i]; bool addressOf = false; if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) { addressOf = true; argi = jl_exprarg(argi,0); } Type *largty; jl_value_t *jargty; if (isVa && ai >= nargty-1) { largty = fargt[nargty-1]; jargty = jl_tparam0(jl_tupleref(tt,nargty-1)); } else { largty = fargt[ai]; jargty = jl_tupleref(tt,ai); } Value *arg; if (largty == jl_pvalue_llvmt) { arg = emit_expr(argi, ctx, true); } else { arg = emit_unboxed(argi, ctx); if (jl_is_bits_type(expr_type(argi, ctx))) { if (addressOf) arg = emit_unbox(largty->getContainedType(0), largty, arg); else arg = emit_unbox(largty, PointerType::get(largty,0), arg); } } /* #ifdef JL_GC_MARKSWEEP // make sure args are rooted if (largty->isPointerTy() && (largty == jl_pvalue_llvmt || !jl_is_bits_type(expr_type(args[i], ctx)))) { make_gcroot(boxed(arg), ctx); } #endif */ argvals[ai] = julia_to_native(largty, jargty, arg, argi, addressOf, ai+1, ctx); } // the actual call Value *result = builder.CreateCall(llvmf, ArrayRef<Value*>(&argvals[0],(nargs-3)/2)); if (cc != CallingConv::C) ((CallInst*)result)->setCallingConv(cc); #ifdef LLVM32 ((CallInst*)result)->setAttributes(AttrListPtr::get(getGlobalContext(), ArrayRef<AttributeWithIndex>(attrs))); #else ((CallInst*)result)->setAttributes(AttrListPtr::get(attrs.data(),attrs.size())); #endif // restore temp argument area stack pointer if (haspointers) { assert(saveloc != NULL); builder.CreateCall(restore_arg_area_loc_func, saveloc); assert(stacksave != NULL); builder.CreateCall(Intrinsic::getDeclaration(jl_Module, Intrinsic::stackrestore), stacksave); } ctx->argDepth = last_depth; if (0) { // Enable this to turn on SSPREQ (-fstack-protector) on the function containing this ccall #ifdef LLVM32 ctx->f->addFnAttr(Attributes::StackProtectReq); #else ctx->f->addFnAttr(Attribute::StackProtectReq); #endif } JL_GC_POP(); if (lrt == T_void) return literal_pointer_val((jl_value_t*)jl_nothing); return mark_julia_type(result, rt); }
// ccall(pointer, rettype, (argtypes...), args...) static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx) { JL_NARGSV(ccall, 3); jl_value_t *rt=NULL, *at=NULL; JL_GC_PUSH2(&rt, &at); native_sym_arg_t symarg = interpret_symbol_arg(args[1], ctx, "ccall"); Value *jl_ptr=NULL; void *fptr = NULL; char *f_name = NULL, *f_lib = NULL; jl_ptr = symarg.jl_ptr; fptr = symarg.fptr; f_name = symarg.f_name; f_lib = symarg.f_lib; if (f_name == NULL && fptr == NULL && jl_ptr == NULL) { JL_GC_POP(); emit_error("ccall: null function pointer", ctx); return literal_pointer_val(jl_nothing); } rt = jl_interpret_toplevel_expr_in(ctx->module, args[2], &jl_tupleref(ctx->sp,0), jl_tuple_len(ctx->sp)/2); if (jl_is_tuple(rt)) { std::string msg = "in " + ctx->funcName + ": ccall: missing return type"; jl_error(msg.c_str()); } if (rt == (jl_value_t*)jl_pointer_type) jl_error("ccall: return type Ptr should have an element type, Ptr{T}"); at = jl_interpret_toplevel_expr_in(ctx->module, args[3], &jl_tupleref(ctx->sp,0), jl_tuple_len(ctx->sp)/2); JL_TYPECHK(ccall, type, rt); JL_TYPECHK(ccall, tuple, at); JL_TYPECHK(ccall, type, at); jl_tuple_t *tt = (jl_tuple_t*)at; std::vector<Type *> fargt(0); std::vector<Type *> fargt_sig(0); Type *lrt = julia_struct_to_llvm(rt); if (lrt == NULL) { JL_GC_POP(); emit_error("ccall: return type doesn't correspond to a C type", ctx); return literal_pointer_val(jl_nothing); } size_t i; bool isVa = false; size_t nargt = jl_tuple_len(tt); std::vector<AttributeWithIndex> attrs; for(i=0; i < nargt; i++) { jl_value_t *tti = jl_tupleref(tt,i); if (tti == (jl_value_t*)jl_pointer_type) jl_error("ccall: argument type Ptr should have an element type, Ptr{T}"); if (jl_is_vararg_type(tti)) { isVa = true; tti = jl_tparam0(tti); } if (jl_is_bitstype(tti)) { // see pull req #978. need to annotate signext/zeroext for // small integer arguments. jl_datatype_t *bt = (jl_datatype_t*)tti; if (bt->size < 4) { if (jl_signed_type == NULL) { jl_signed_type = jl_get_global(jl_core_module,jl_symbol("Signed")); } #ifdef LLVM32 Attributes::AttrVal av; if (jl_signed_type && jl_subtype(tti, jl_signed_type, 0)) av = Attributes::SExt; else av = Attributes::ZExt; attrs.push_back(AttributeWithIndex::get(getGlobalContext(), i+1, ArrayRef<Attributes::AttrVal>(&av, 1))); #else Attribute::AttrConst av; if (jl_signed_type && jl_subtype(tti, jl_signed_type, 0)) av = Attribute::SExt; else av = Attribute::ZExt; attrs.push_back(AttributeWithIndex::get(i+1, av)); #endif } } Type *t = julia_struct_to_llvm(tti); if (t == NULL) { JL_GC_POP(); std::stringstream msg; msg << "ccall: the type of argument "; msg << i+1; msg << " doesn't correspond to a C type"; emit_error(msg.str(), ctx); return literal_pointer_val(jl_nothing); } fargt.push_back(t); if (!isVa) fargt_sig.push_back(t); } // check for calling convention specifier CallingConv::ID cc = CallingConv::C; jl_value_t *last = args[nargs]; if (jl_is_expr(last)) { jl_sym_t *lhd = ((jl_expr_t*)last)->head; if (lhd == jl_symbol("stdcall")) { cc = CallingConv::X86_StdCall; nargs--; } else if (lhd == jl_symbol("cdecl")) { cc = CallingConv::C; nargs--; } else if (lhd == jl_symbol("fastcall")) { cc = CallingConv::X86_FastCall; nargs--; } else if (lhd == jl_symbol("thiscall")) { cc = CallingConv::X86_ThisCall; nargs--; } } if ((!isVa && jl_tuple_len(tt) != (nargs-2)/2) || ( isVa && jl_tuple_len(tt)-1 > (nargs-2)/2)) jl_error("ccall: wrong number of arguments to C function"); // some special functions if (fptr == &jl_array_ptr) { assert(lrt->isPointerTy()); Value *ary = emit_expr(args[4], ctx); JL_GC_POP(); return mark_julia_type(builder.CreateBitCast(emit_arrayptr(ary),lrt), rt); } if (fptr == &jl_value_ptr) { assert(lrt->isPointerTy()); jl_value_t *argi = args[4]; bool addressOf = false; if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) { addressOf = true; argi = jl_exprarg(argi,0); } Value *ary = boxed(emit_expr(argi, ctx)); JL_GC_POP(); return mark_julia_type( builder.CreateBitCast(emit_nthptr_addr(ary, addressOf?1:0),lrt), rt); } // make LLVM function object for the target Value *llvmf; FunctionType *functype = FunctionType::get(lrt, fargt_sig, isVa); if (jl_ptr != NULL) { null_pointer_check(jl_ptr,ctx); Type *funcptype = PointerType::get(functype,0); llvmf = builder.CreateIntToPtr(jl_ptr, funcptype); } else if (fptr != NULL) { Type *funcptype = PointerType::get(functype,0); llvmf = literal_pointer_val(fptr, funcptype); } else { void *symaddr; if (f_lib != NULL) symaddr = add_library_sym(f_name, f_lib); else symaddr = sys::DynamicLibrary::SearchForAddressOfSymbol(f_name); if (symaddr == NULL) { JL_GC_POP(); std::stringstream msg; msg << "ccall: could not find function "; msg << f_name; if (f_lib != NULL) { msg << " in library "; msg << f_lib; } emit_error(msg.str(), ctx); return literal_pointer_val(jl_nothing); } llvmf = jl_Module->getOrInsertFunction(f_name, functype); } // save place before arguments, for possible insertion of temp arg // area saving code. Value *saveloc=NULL; Value *stacksave=NULL; BasicBlock::InstListType &instList = builder.GetInsertBlock()->getInstList(); Instruction *savespot; if (instList.empty()) { savespot = NULL; } else { // hey C++, there's this thing called pointers... Instruction &_savespot = builder.GetInsertBlock()->back(); savespot = &_savespot; } // emit arguments Value *argvals[(nargs-3)/2]; int last_depth = ctx->argDepth; int nargty = jl_tuple_len(tt); bool needTempSpace = false; for(i=4; i < nargs+1; i+=2) { int ai = (i-4)/2; jl_value_t *argi = args[i]; bool addressOf = false; if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) { addressOf = true; argi = jl_exprarg(argi,0); } Type *largty; jl_value_t *jargty; if (isVa && ai >= nargty-1) { largty = fargt[nargty-1]; jargty = jl_tparam0(jl_tupleref(tt,nargty-1)); } else { largty = fargt[ai]; jargty = jl_tupleref(tt,ai); } Value *arg; if (largty == jl_pvalue_llvmt || largty->isStructTy()) { arg = emit_expr(argi, ctx, true); } else { arg = emit_unboxed(argi, ctx); if (jl_is_bitstype(expr_type(argi, ctx))) { if (addressOf) arg = emit_unbox(largty->getContainedType(0), largty, arg); else arg = emit_unbox(largty, PointerType::get(largty,0), arg); } } /* #ifdef JL_GC_MARKSWEEP // make sure args are rooted if (largty->isPointerTy() && (largty == jl_pvalue_llvmt || !jl_is_bits_type(expr_type(args[i], ctx)))) { make_gcroot(boxed(arg), ctx); } #endif */ bool mightNeed=false; argvals[ai] = julia_to_native(largty, jargty, arg, argi, addressOf, ai+1, ctx, &mightNeed); needTempSpace |= mightNeed; } if (needTempSpace) { // save temp argument area stack pointer // TODO: inline this saveloc = CallInst::Create(save_arg_area_loc_func); stacksave = CallInst::Create(Intrinsic::getDeclaration(jl_Module, Intrinsic::stacksave)); if (savespot) instList.insertAfter(savespot, (Instruction*)saveloc); else instList.push_front((Instruction*)saveloc); instList.insertAfter((Instruction*)saveloc, (Instruction*)stacksave); } // the actual call Value *result = builder.CreateCall(llvmf, ArrayRef<Value*>(&argvals[0],(nargs-3)/2)); if (cc != CallingConv::C) ((CallInst*)result)->setCallingConv(cc); #ifdef LLVM32 ((CallInst*)result)->setAttributes(AttrListPtr::get(getGlobalContext(), ArrayRef<AttributeWithIndex>(attrs))); #else ((CallInst*)result)->setAttributes(AttrListPtr::get(attrs.data(),attrs.size())); #endif if (needTempSpace) { // restore temp argument area stack pointer assert(saveloc != NULL); builder.CreateCall(restore_arg_area_loc_func, saveloc); assert(stacksave != NULL); builder.CreateCall(Intrinsic::getDeclaration(jl_Module, Intrinsic::stackrestore), stacksave); } ctx->argDepth = last_depth; if (0) { // Enable this to turn on SSPREQ (-fstack-protector) on the function containing this ccall #ifdef LLVM32 ctx->f->addFnAttr(Attributes::StackProtectReq); #else ctx->f->addFnAttr(Attribute::StackProtectReq); #endif } JL_GC_POP(); if (lrt == T_void) return literal_pointer_val((jl_value_t*)jl_nothing); if (lrt->isStructTy()) { //fprintf(stderr, "ccall rt: %s -> %s\n", f_name, ((jl_tag_type_t*)rt)->name->name->name); assert(jl_is_structtype(rt)); Value *strct = builder.CreateCall(jlallocobj_func, ConstantInt::get(T_size, sizeof(void*)+((jl_datatype_t*)rt)->size)); builder.CreateStore(literal_pointer_val((jl_value_t*)rt), emit_nthptr_addr(strct, (size_t)0)); builder.CreateStore(result, builder.CreateBitCast( emit_nthptr_addr(strct, (size_t)1), PointerType::get(lrt,0))); return mark_julia_type(strct, rt); } return mark_julia_type(result, rt); }
DLLEXPORT void jl_typeassert(jl_value_t *x, jl_value_t *t) { if (!jl_subtype(x,t,1)) jl_type_error("typeassert", t, x); }
/* Method caches are divided into three parts: one for signatures where the first argument is a singleton kind (Type{Foo}), one indexed by the UID of the first argument's type in normal cases, and a fallback table of everything else. Note that the "primary key" is the type of the first *argument*, since there tends to be lots of variation there. The type of the 0th argument (the function) is always the same for most functions. */ static jl_typemap_entry_t *jl_typemap_assoc_by_type_(jl_typemap_entry_t *ml, jl_tupletype_t *types, int8_t inexact, jl_svec_t **penv) { size_t n = jl_field_count(types); while (ml != (void*)jl_nothing) { size_t lensig = jl_field_count(ml->sig); if (lensig == n || (ml->va && lensig <= n+1)) { int resetenv = 0, ismatch = 1; if (ml->simplesig != (void*)jl_nothing) { size_t lensimplesig = jl_field_count(ml->simplesig); int isva = lensimplesig > 0 && jl_is_vararg_type(jl_tparam(ml->simplesig, lensimplesig - 1)); if (lensig == n || (isva && lensimplesig <= n + 1)) ismatch = sig_match_by_type_simple(jl_svec_data(types->parameters), n, ml->simplesig, lensimplesig, isva); else ismatch = 0; } if (ismatch == 0) ; // nothing else if (ml->isleafsig) ismatch = sig_match_by_type_leaf(jl_svec_data(types->parameters), ml->sig, lensig); else if (ml->issimplesig) ismatch = sig_match_by_type_simple(jl_svec_data(types->parameters), n, ml->sig, lensig, ml->va); else if (ml->tvars == jl_emptysvec) ismatch = jl_tuple_subtype(jl_svec_data(types->parameters), n, ml->sig, 0); else if (penv == NULL) { ismatch = jl_type_match((jl_value_t*)types, (jl_value_t*)ml->sig) != (jl_value_t*)jl_false; } else { // TODO: this is missing the actual subtype test, // which works currently because types is typically a leaf tt, // or inexact is set (which then does a sort of subtype test via jl_types_equal) // but this isn't entirely general jl_value_t *ti = jl_lookup_match((jl_value_t*)types, (jl_value_t*)ml->sig, penv, ml->tvars); resetenv = 1; ismatch = (ti != (jl_value_t*)jl_bottom_type); if (ismatch) { // parametric methods only match if all typevars are matched by // non-typevars. size_t i, l; for (i = 0, l = jl_svec_len(*penv); i < l; i++) { if (jl_is_typevar(jl_svecref(*penv, i))) { if (inexact) { // "inexact" means the given type is compile-time, // where a failure to determine the value of a // static parameter is inconclusive. // this is issue #3182, see test/core.jl return INEXACT_ENTRY; } ismatch = 0; break; } } if (inexact) { // the compiler might attempt jl_get_specialization on e.g. // convert(::Type{Type{Int}}, ::DataType), which is concrete but might not // equal the run time type. in this case ti would be {Type{Type{Int}}, Type{Int}} // but tt would be {Type{Type{Int}}, DataType}. JL_GC_PUSH1(&ti); ismatch = jl_types_equal(ti, (jl_value_t*)types); JL_GC_POP(); if (!ismatch) return INEXACT_ENTRY; } } } if (ismatch) { size_t i, l; for (i = 0, l = jl_svec_len(ml->guardsigs); i < l; i++) { // see corresponding code in jl_typemap_assoc_exact if (jl_subtype((jl_value_t*)types, jl_svecref(ml->guardsigs, i), 0)) { ismatch = 0; break; } } if (ismatch) return ml; } if (resetenv) *penv = jl_emptysvec; } ml = ml->next; } return NULL; }
static jl_function_t *cache_method(jl_methtable_t *mt, jl_tuple_t *type, jl_function_t *method, jl_tuple_t *decl, jl_tuple_t *sparams) { size_t i; int need_dummy_entries = 0; jl_value_t *temp=NULL; jl_function_t *newmeth=NULL; JL_GC_PUSH(&type, &temp, &newmeth); for (i=0; i < type->length; i++) { jl_value_t *elt = jl_tupleref(type,i); int set_to_any = 0; if (nth_slot_type(decl,i) == jl_ANY_flag) { // don't specialize on slots marked ANY temp = jl_tupleref(type, i); jl_tupleset(type, i, (jl_value_t*)jl_any_type); int nintr=0; jl_methlist_t *curr = mt->defs; // if this method is the only match even with the current slot // set to Any, then it is safe to cache it that way. while (curr != NULL && curr->func!=method) { if (jl_type_intersection((jl_value_t*)curr->sig, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) { nintr++; break; } curr = curr->next; } if (nintr) { // TODO: even if different specializations of this slot need // separate cache entries, have them share code. jl_tupleset(type, i, temp); } else { set_to_any = 1; } } if (set_to_any) { } else if (jl_is_tuple(elt)) { /* don't cache tuple type exactly; just remember that it was a tuple, unless the declaration asks for something more specific. determined with a type intersection. */ int might_need_dummy=0; temp = jl_tupleref(type, i); if (i < decl->length) { jl_value_t *declt = jl_tupleref(decl,i); // for T..., intersect with T if (jl_is_seq_type(declt)) declt = jl_tparam0(declt); if (declt == (jl_value_t*)jl_tuple_type || jl_subtype((jl_value_t*)jl_tuple_type, declt, 0)) { // don't specialize args that matched (Any...) or Any jl_tupleset(type, i, (jl_value_t*)jl_tuple_type); might_need_dummy = 1; } else { declt = jl_type_intersection(declt, (jl_value_t*)jl_tuple_type); if (((jl_tuple_t*)elt)->length > 3 || tuple_all_Any((jl_tuple_t*)declt)) { jl_tupleset(type, i, declt); might_need_dummy = 1; } } } else { jl_tupleset(type, i, (jl_value_t*)jl_tuple_type); might_need_dummy = 1; } assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type); if (might_need_dummy) { jl_methlist_t *curr = mt->defs; // can't generalize type if there's an overlapping definition // with typevars while (curr != NULL && curr->func!=method) { if (curr->tvars!=jl_null && jl_type_intersection((jl_value_t*)curr->sig, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) { jl_tupleset(type, i, temp); might_need_dummy = 0; break; } curr = curr->next; } } if (might_need_dummy) { jl_methlist_t *curr = mt->defs; while (curr != NULL && curr->func!=method) { jl_tuple_t *sig = curr->sig; if (sig->length > i && jl_is_tuple(jl_tupleref(sig,i))) { need_dummy_entries = 1; break; } curr = curr->next; } } } else if (jl_is_type_type(elt) && jl_is_type_type(jl_tparam0(elt))) { /* actual argument was Type{...}, we computed its type as Type{Type{...}}. we must avoid unbounded nesting here, so cache the signature as Type{T}, unless something more specific like Type{Type{Int32}} was actually declared. this can be determined using a type intersection. */ if (i < decl->length) { jl_value_t *declt = jl_tupleref(decl,i); // for T..., intersect with T if (jl_is_seq_type(declt)) declt = jl_tparam0(declt); jl_tupleset(type, i, jl_type_intersection(declt, (jl_value_t*)jl_typetype_type)); } else { jl_tupleset(type, i, (jl_value_t*)jl_typetype_type); } assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type); } else if (jl_is_type_type(elt) && very_general_type(nth_slot_type(decl,i))) { /* here's a fairly complex heuristic: if this argument slot's declared type is Any, and no definition overlaps with Type for this slot, then don't specialize for every Type that might be passed. Since every type x has its own type Type{x}, this would be excessive specialization for an Any slot. */ int ok=1; jl_methlist_t *curr = mt->defs; while (curr != NULL) { jl_value_t *slottype = nth_slot_type(curr->sig, i); if (slottype && !very_general_type(slottype) && jl_type_intersection(slottype, (jl_value_t*)jl_type_type) != (jl_value_t*)jl_bottom_type) { ok=0; break; } curr = curr->next; } if (ok) { jl_tupleset(type, i, (jl_value_t*)jl_typetype_type); } } } // for varargs methods, only specialize up to max_args. // in general, here we want to find the biggest type that's not a // supertype of any other method signatures. so far we are conservative // and the types we find should be bigger. if (type->length > jl_unbox_long(mt->max_args) && jl_is_seq_type(jl_tupleref(decl,decl->length-1))) { size_t nspec = jl_unbox_long(mt->max_args)+2; jl_tuple_t *limited = jl_alloc_tuple(nspec); for(i=0; i < nspec-1; i++) { jl_tupleset(limited, i, jl_tupleref(type, i)); } jl_value_t *lasttype = jl_tupleref(type,i-1); // if all subsequent arguments are subtypes of lasttype, specialize // on that instead of decl. for example, if decl is // (Any...) // and type is // (Symbol, Symbol, Symbol) // then specialize as (Symbol...), but if type is // (Symbol, Int32, Expr) // then specialize as (Any...) size_t j = i; int all_are_subtypes=1; for(; j < type->length; j++) { if (!jl_subtype(jl_tupleref(type,j), lasttype, 0)) { all_are_subtypes = 0; break; } } type = limited; if (all_are_subtypes) { // avoid Type{Type{...}...}... if (jl_is_type_type(lasttype)) lasttype = (jl_value_t*)jl_type_type; temp = (jl_value_t*)jl_tuple1(lasttype); jl_tupleset(type, i, jl_apply_type((jl_value_t*)jl_seq_type, (jl_tuple_t*)temp)); } else { jl_value_t *lastdeclt = jl_tupleref(decl,decl->length-1); if (sparams->length > 0) { lastdeclt = (jl_value_t*) jl_instantiate_type_with((jl_type_t*)lastdeclt, sparams->data, sparams->length/2); } jl_tupleset(type, i, lastdeclt); } // now there is a problem: the computed signature is more // general than just the given arguments, so it might conflict // with another definition that doesn't have cache instances yet. // to fix this, we insert dummy cache entries for all intersections // of this signature and definitions. those dummy entries will // supersede this one in conflicted cases, alerting us that there // should actually be a cache miss. need_dummy_entries = 1; } if (need_dummy_entries) { temp = ml_matches(mt->defs, (jl_value_t*)type, lambda_sym, -1); for(i=0; i < jl_array_len(temp); i++) { jl_value_t *m = jl_cellref(temp, i); if (jl_tupleref(m,2) != (jl_value_t*)method->linfo) { jl_method_cache_insert(mt, (jl_tuple_t*)jl_tupleref(m, 0), NULL); } } } // here we infer types and specialize the method /* if (sparams==jl_null) newmeth = method; else */ jl_array_t *lilist=NULL; jl_lambda_info_t *li=NULL; if (method->linfo && method->linfo->specializations!=NULL) { // reuse code already generated for this combination of lambda and // arguments types. this happens for inner generic functions where // a new closure is generated on each call to the enclosing function. lilist = method->linfo->specializations; int k; for(k=0; k < lilist->length; k++) { li = (jl_lambda_info_t*)jl_cellref(lilist, k); if (jl_types_equal(li->specTypes, (jl_value_t*)type)) break; } if (k == lilist->length) lilist=NULL; } if (lilist != NULL && !li->inInference) { assert(li); newmeth = jl_reinstantiate_method(method, li); (void)jl_method_cache_insert(mt, type, newmeth); JL_GC_POP(); return newmeth; } else { newmeth = jl_instantiate_method(method, sparams); } /* if "method" itself can ever be compiled, for example for use as an unspecialized method (see below), then newmeth->fptr might point to some slow compiled code instead of jl_trampoline, meaning our type-inferred code would never get compiled. this can be fixed with the commented-out snippet below. */ assert(!(newmeth->linfo && newmeth->linfo->ast) || newmeth->fptr == &jl_trampoline); /* if (newmeth->linfo&&newmeth->linfo->ast&&newmeth->fptr!=&jl_trampoline) { newmeth->fptr = &jl_trampoline; } */ (void)jl_method_cache_insert(mt, type, newmeth); if (newmeth->linfo != NULL && newmeth->linfo->sparams == jl_null) { // when there are no static parameters, one unspecialized version // of a function can be shared among all cached specializations. if (method->linfo->unspecialized == NULL) { method->linfo->unspecialized = jl_instantiate_method(method, jl_null); } newmeth->linfo->unspecialized = method->linfo->unspecialized; } if (newmeth->linfo != NULL && newmeth->linfo->ast != NULL) { newmeth->linfo->specTypes = (jl_value_t*)type; jl_array_t *spe = method->linfo->specializations; if (spe == NULL) { spe = jl_alloc_cell_1d(1); jl_cellset(spe, 0, newmeth->linfo); } else { jl_cell_1d_push(spe, (jl_value_t*)newmeth->linfo); } method->linfo->specializations = spe; jl_type_infer(newmeth->linfo, type, method->linfo); } JL_GC_POP(); return newmeth; }