// this is a heuristic for allowing "redefining" a type to something identical static int equiv_type(jl_datatype_t *dta, jl_datatype_t *dtb) { if (!(jl_typeof(dta) == jl_typeof(dtb) && dta->name->name == dtb->name->name && dta->abstract == dtb->abstract && dta->mutabl == dtb->mutabl && dta->size == dtb->size && dta->ninitialized == dtb->ninitialized && jl_egal((jl_value_t*)dta->name->names, (jl_value_t*)dtb->name->names) && jl_nparams(dta) == jl_nparams(dtb) && jl_field_count(dta) == jl_field_count(dtb))) return 0; jl_value_t *a=NULL, *b=NULL; int ok = 1; size_t i, nf = jl_field_count(dta); JL_GC_PUSH2(&a, &b); a = jl_rewrap_unionall((jl_value_t*)dta->super, dta->name->wrapper); b = jl_rewrap_unionall((jl_value_t*)dtb->super, dtb->name->wrapper); if (!jl_types_equal(a, b)) goto no; JL_TRY { a = jl_apply_type(dtb->name->wrapper, jl_svec_data(dta->parameters), jl_nparams(dta)); } JL_CATCH { ok = 0; } if (!ok) goto no; assert(jl_is_datatype(a)); a = dta->name->wrapper; b = dtb->name->wrapper; while (jl_is_unionall(a)) { jl_unionall_t *ua = (jl_unionall_t*)a; jl_unionall_t *ub = (jl_unionall_t*)b; if (!jl_egal(ua->var->lb, ub->var->lb) || !jl_egal(ua->var->ub, ub->var->ub) || ua->var->name != ub->var->name) goto no; a = jl_instantiate_unionall(ua, (jl_value_t*)ub->var); b = ub->body; } assert(jl_is_datatype(a) && jl_is_datatype(b)); for (i=0; i < nf; i++) { jl_value_t *ta = jl_svecref(((jl_datatype_t*)a)->types, i); jl_value_t *tb = jl_svecref(((jl_datatype_t*)b)->types, i); if (jl_has_free_typevars(ta)) { if (!jl_has_free_typevars(tb) || !jl_egal(ta, tb)) goto no; } else if (jl_has_free_typevars(tb) || jl_typeof(ta) != jl_typeof(tb) || !jl_types_equal(ta, tb)) { goto no; } } JL_GC_POP(); return 1; no: JL_GC_POP(); return 0; }
static int sig_match_by_type_simple(jl_value_t **types, size_t n, jl_tupletype_t *sig, size_t lensig, int va) { size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = jl_field_type(sig, i); jl_value_t *a = types[i]; if (jl_is_type_type(decl)) { jl_value_t *tp0 = jl_tparam0(decl); if (jl_is_type_type(a)) { if (tp0 == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else if (jl_is_typevar(tp0)) { if (!jl_subtype(jl_tparam0(a), ((jl_tvar_t*)tp0)->ub, 0)) return 0; } else { if (!jl_types_equal(jl_tparam0(a), tp0)) return 0; } } else if (!is_kind(a) || !jl_is_typevar(tp0) || ((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type) { // manually unroll jl_subtype(a, decl) // where `a` can be a subtype like TypeConstructor // and decl is Type{T} return 0; } } else if (decl == (jl_value_t*)jl_any_type) { } else { if (jl_is_type_type(a)) // decl is not Type, because it would be caught above a = jl_typeof(jl_tparam0(a)); if (!jl_types_equal(a, decl)) return 0; } } if (va) { jl_value_t *decl = jl_field_type(sig, i); if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(types[i], t, 0)) return 0; } return 1; } return 1; }
static int sig_match_by_type_simple(jl_value_t **types, size_t n, jl_tupletype_t *sig, size_t lensig, int va) { size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = jl_field_type(sig, i); jl_value_t *a = types[i]; jl_value_t *unw = jl_is_unionall(decl) ? ((jl_unionall_t*)decl)->body : decl; if (jl_is_type_type(unw)) { jl_value_t *tp0 = jl_tparam0(unw); if (jl_is_type_type(a)) { if (jl_is_typevar(tp0)) { // in the case of Type{_}, the types don't have to match exactly. // this is cached as `Type{T} where T`. if (((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type && !jl_subtype(jl_tparam0(a), ((jl_tvar_t*)tp0)->ub)) return 0; } else { if (!(jl_typeof(jl_tparam0(a)) == jl_typeof(tp0) && jl_types_equal(jl_tparam0(a), tp0))) return 0; } } else if (!jl_is_kind(a) || !jl_is_typevar(tp0) || ((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type) { // manually unroll jl_subtype(a, decl) // where `a` can be a subtype and decl is Type{T} return 0; } } else if (decl == (jl_value_t*)jl_any_type) { } else { if (jl_is_type_type(a)) // decl is not Type, because it would be caught above a = jl_typeof(jl_tparam0(a)); if (!jl_types_equal(a, decl)) return 0; } } if (va) { jl_value_t *decl = jl_unwrap_unionall(jl_field_type(sig, i)); if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_tparam0(decl); if (jl_is_typevar(t)) t = ((jl_tvar_t*)t)->ub; for(; i < n; i++) { if (!jl_subtype(types[i], t)) return 0; } return 1; } return 1; }
static int cache_match_by_type(jl_value_t **types, size_t n, jl_tuple_t *sig, int va) { if (!va && n > sig->length) return 0; if (sig->length > n) { if (!(n == sig->length-1 && va)) return 0; } size_t i; for(i=0; i < n; i++) { jl_value_t *decl = jl_tupleref(sig, i); if (i == sig->length-1) { if (va) { jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(types[i], t, 0)) return 0; } return 1; } } jl_value_t *a = types[i]; if (jl_is_tuple(decl)) { // tuples don't have to match exactly, to avoid caching // signatures for tuples of every length if (!jl_subtype(a, decl, 0)) return 0; } else if (jl_is_tag_type(a) && jl_is_tag_type(decl) && ((jl_tag_type_t*)decl)->name == jl_type_type->name && ((jl_tag_type_t*)a )->name == jl_type_type->name) { if (jl_tparam0(decl) == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else { if (!jl_types_equal(jl_tparam0(a), jl_tparam0(decl))) { return 0; } } } else if (decl == (jl_value_t*)jl_any_type) { } else { if (!jl_types_equal(a, decl)) return 0; } } return 1; }
JL_DLLEXPORT int jl_args_morespecific(jl_value_t *a, jl_value_t *b) { int msp = jl_type_morespecific(a,b); int btv = jl_has_typevars(b); if (btv) { if (jl_type_match_morespecific(a,b) == (jl_value_t*)jl_false) { if (jl_has_typevars(a)) return 0; return msp; } if (jl_has_typevars(a)) { type_match_invariance_mask = 0; //int result = jl_type_match_morespecific(b,a) == (jl_value_t*)jl_false); // this rule seems to work better: int result = jl_type_match(b,a) == (jl_value_t*)jl_false; type_match_invariance_mask = 1; if (result) return 1; } int nmsp = jl_type_morespecific(b,a); if (nmsp == msp) return 0; } if (jl_has_typevars((jl_value_t*)a)) { int nmsp = jl_type_morespecific(b,a); if (nmsp && msp) return 1; if (!btv && jl_types_equal(a,b)) return 1; if (jl_type_match_morespecific(b,a) != (jl_value_t*)jl_false) return 0; } return msp; }
static jl_typemap_entry_t *jl_typemap_lookup_by_type_(jl_typemap_entry_t *ml, jl_value_t *types, size_t world, size_t max_world_mask) { for (; ml != (void*)jl_nothing; ml = ml->next) { if (world < ml->min_world || world > (ml->max_world | max_world_mask)) continue; // unroll the first few cases here, to the extent that is possible to do fast and easily jl_value_t *a = jl_unwrap_unionall(types); jl_value_t *b = jl_unwrap_unionall((jl_value_t*)ml->sig); size_t na = jl_nparams(a); size_t nb = jl_nparams(b); int va_a = na > 0 && jl_is_vararg_type(jl_tparam(a, na - 1)); int va_b = nb > 0 && jl_is_vararg_type(jl_tparam(b, nb - 1)); if (!va_a && !va_b) { if (na != nb) continue; } if (na - va_a > 0 && nb - va_b > 0) { if (jl_obviously_unequal(jl_tparam(a, 0), jl_tparam(b, 0))) continue; if (na - va_a > 1 && nb - va_b > 1) { if (jl_obviously_unequal(jl_tparam(a, 1), jl_tparam(b, 1))) continue; if (na - va_a > 2 && nb - va_b > 2) { if (jl_obviously_unequal(jl_tparam(a, 2), jl_tparam(b, 2))) continue; } } } if (jl_types_equal((jl_value_t*)types, (jl_value_t*)ml->sig)) return ml; } return NULL; }
static inline int sig_match_simple(jl_value_t **args, size_t n, jl_value_t **sig, int va, size_t lensig) { // NOTE: This function is a performance hot spot!! size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = sig[i]; jl_value_t *a = args[i]; if (decl == (jl_value_t*)jl_any_type || ((jl_value_t*)jl_typeof(a) == decl)) { /* we are only matching concrete types here, and those types are hash-consed, so pointer comparison should work. */ continue; } jl_value_t *unw = jl_is_unionall(decl) ? ((jl_unionall_t*)decl)->body : decl; if (jl_is_type_type(unw) && jl_is_type(a)) { jl_value_t *tp0 = jl_tparam0(unw); if (jl_is_typevar(tp0)) { // in the case of Type{_}, the types don't have to match exactly. // this is cached as `Type{T} where T`. if (((jl_tvar_t*)tp0)->ub != (jl_value_t*)jl_any_type && !jl_subtype(a, ((jl_tvar_t*)tp0)->ub)) return 0; } else { if (a != tp0) { if (jl_typeof(a) != jl_typeof(tp0)) return 0; jl_datatype_t *da = (jl_datatype_t*)a; jl_datatype_t *dt = (jl_datatype_t*)tp0; while (jl_is_unionall(da)) da = (jl_datatype_t*)((jl_unionall_t*)da)->body; while (jl_is_unionall(dt)) dt = (jl_datatype_t*)((jl_unionall_t*)dt)->body; if (jl_is_datatype(da) && jl_is_datatype(dt) && da->name != dt->name) return 0; if (!jl_types_equal(a, tp0)) return 0; } } } else { return 0; } } if (va) { jl_value_t *decl = sig[i]; if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_unwrap_vararg(decl); for(; i < n; i++) { if (!jl_isa(args[i], t)) return 0; } return 1; } return 1; }
static int sigs_eq(jl_value_t *a, jl_value_t *b) { if (jl_has_typevars(a) || jl_has_typevars(b)) { return jl_types_equal_generic(a,b); } return jl_types_equal(a, b); }
static inline int sig_match_simple(jl_value_t **args, size_t n, jl_value_t **sig, int va, size_t lensig) { // NOTE: This function is a performance hot spot!! size_t i; if (va) lensig -= 1; for (i = 0; i < lensig; i++) { jl_value_t *decl = sig[i]; jl_value_t *a = args[i]; if (decl == (jl_value_t*)jl_any_type) { } else if ((jl_value_t*)jl_typeof(a) == decl) { /* we are only matching concrete types here, and those types are hash-consed, so pointer comparison should work. */ } else if (jl_is_type_type(decl) && jl_is_type(a)) { jl_value_t *tp0 = jl_tparam0(decl); if (tp0 == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else if (jl_is_typevar(tp0)) { if (!jl_subtype(a, ((jl_tvar_t*)tp0)->ub, 0)) return 0; } else { if (a!=tp0 && !jl_types_equal(a,tp0)) return 0; } } else { return 0; } } if (va) { jl_value_t *decl = sig[i]; if (jl_vararg_kind(decl) == JL_VARARG_INT) { if (n-i != jl_unbox_long(jl_tparam1(decl))) return 0; } jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(args[i], t, 1)) return 0; } return 1; } return 1; }
static inline int cache_match(jl_value_t **args, size_t n, jl_tuple_t *sig, int va) { if (sig->length > n) { if (n != sig->length-1) return 0; } size_t i; for(i=0; i < n; i++) { jl_value_t *decl = jl_tupleref(sig, i); if (i == sig->length-1) { if (va) { jl_value_t *t = jl_tparam0(decl); for(; i < n; i++) { if (!jl_subtype(args[i], t, 1)) return 0; } return 1; } } jl_value_t *a = args[i]; if (jl_is_tuple(decl)) { // tuples don't have to match exactly, to avoid caching // signatures for tuples of every length if (!jl_subtype(a, decl, 1)) return 0; } else if (jl_is_type_type(decl) && jl_is_nontuple_type(a)) { //*** if (jl_tparam0(decl) == (jl_value_t*)jl_typetype_tvar) { // in the case of Type{T}, the types don't have // to match exactly either. this is cached as Type{T}. // analogous to the situation with tuples. } else { if (a!=jl_tparam0(decl) && !jl_types_equal(a,jl_tparam0(decl))) return 0; } } else if (decl == (jl_value_t*)jl_any_type) { } else { /* we know there are only concrete types here, and types are hash-consed, so pointer comparison should work. */ if ((jl_value_t*)jl_typeof(a) != decl) return 0; } } return 1; }
static int sig_match_by_type_leaf(jl_value_t **types, jl_tupletype_t *sig, size_t n) { size_t i; for(i=0; i < n; i++) { jl_value_t *decl = jl_field_type(sig, i); jl_value_t *a = types[i]; if (jl_is_type_type(a)) // decl is not Type, because it wouldn't be leafsig a = jl_typeof(jl_tparam0(a)); if (!jl_types_equal(a, decl)) return 0; } return 1; }
/* Method caches are divided into three parts: one for signatures where the first argument is a singleton kind (Type{Foo}), one indexed by the UID of the first argument's type in normal cases, and a fallback table of everything else. Note that the "primary key" is the type of the first *argument*, since there tends to be lots of variation there. The type of the 0th argument (the function) is always the same for most functions. */ static jl_typemap_entry_t *jl_typemap_assoc_by_type_(jl_typemap_entry_t *ml, jl_tupletype_t *types, int8_t inexact, jl_svec_t **penv) { size_t n = jl_field_count(types); while (ml != (void*)jl_nothing) { size_t lensig = jl_field_count(ml->sig); if (lensig == n || (ml->va && lensig <= n+1)) { int resetenv = 0, ismatch = 1; if (ml->simplesig != (void*)jl_nothing) { size_t lensimplesig = jl_field_count(ml->simplesig); int isva = lensimplesig > 0 && jl_is_vararg_type(jl_tparam(ml->simplesig, lensimplesig - 1)); if (lensig == n || (isva && lensimplesig <= n + 1)) ismatch = sig_match_by_type_simple(jl_svec_data(types->parameters), n, ml->simplesig, lensimplesig, isva); else ismatch = 0; } if (ismatch == 0) ; // nothing else if (ml->isleafsig) ismatch = sig_match_by_type_leaf(jl_svec_data(types->parameters), ml->sig, lensig); else if (ml->issimplesig) ismatch = sig_match_by_type_simple(jl_svec_data(types->parameters), n, ml->sig, lensig, ml->va); else if (ml->tvars == jl_emptysvec) ismatch = jl_tuple_subtype(jl_svec_data(types->parameters), n, ml->sig, 0); else if (penv == NULL) { ismatch = jl_type_match((jl_value_t*)types, (jl_value_t*)ml->sig) != (jl_value_t*)jl_false; } else { // TODO: this is missing the actual subtype test, // which works currently because types is typically a leaf tt, // or inexact is set (which then does a sort of subtype test via jl_types_equal) // but this isn't entirely general jl_value_t *ti = jl_lookup_match((jl_value_t*)types, (jl_value_t*)ml->sig, penv, ml->tvars); resetenv = 1; ismatch = (ti != (jl_value_t*)jl_bottom_type); if (ismatch) { // parametric methods only match if all typevars are matched by // non-typevars. size_t i, l; for (i = 0, l = jl_svec_len(*penv); i < l; i++) { if (jl_is_typevar(jl_svecref(*penv, i))) { if (inexact) { // "inexact" means the given type is compile-time, // where a failure to determine the value of a // static parameter is inconclusive. // this is issue #3182, see test/core.jl return INEXACT_ENTRY; } ismatch = 0; break; } } if (inexact) { // the compiler might attempt jl_get_specialization on e.g. // convert(::Type{Type{Int}}, ::DataType), which is concrete but might not // equal the run time type. in this case ti would be {Type{Type{Int}}, Type{Int}} // but tt would be {Type{Type{Int}}, DataType}. JL_GC_PUSH1(&ti); ismatch = jl_types_equal(ti, (jl_value_t*)types); JL_GC_POP(); if (!ismatch) return INEXACT_ENTRY; } } } if (ismatch) { size_t i, l; for (i = 0, l = jl_svec_len(ml->guardsigs); i < l; i++) { // see corresponding code in jl_typemap_assoc_exact if (jl_subtype((jl_value_t*)types, jl_svecref(ml->guardsigs, i), 0)) { ismatch = 0; break; } } if (ismatch) return ml; } if (resetenv) *penv = jl_emptysvec; } ml = ml->next; } return NULL; }
JL_DLLEXPORT jl_array_t *jl_reshape_array(jl_value_t *atype, jl_array_t *data, jl_value_t *_dims) { jl_ptls_t ptls = jl_get_ptls_states(); jl_array_t *a; size_t ndims = jl_nfields(_dims); assert(is_ntuple_long(_dims)); size_t *dims = (size_t*)_dims; assert(jl_types_equal(jl_tparam0(jl_typeof(data)), jl_tparam0(atype))); int ndimwords = jl_array_ndimwords(ndims); int tsz = JL_ARRAY_ALIGN(sizeof(jl_array_t) + ndimwords * sizeof(size_t) + sizeof(void*), JL_SMALL_BYTE_ALIGNMENT); a = (jl_array_t*)jl_gc_alloc(ptls, tsz, atype); // No allocation or safepoint allowed after this a->flags.pooled = tsz <= GC_MAX_SZCLASS; a->flags.ndims = ndims; a->offset = 0; a->data = NULL; a->flags.isaligned = data->flags.isaligned; jl_array_t *owner = (jl_array_t*)jl_array_owner(data); jl_value_t *eltype = jl_tparam0(atype); size_t elsz = 0, align = 0; int isboxed = !jl_islayout_inline(eltype, &elsz, &align); assert(isboxed == data->flags.ptrarray); if (!isboxed) { a->elsize = elsz; jl_value_t *ownerty = jl_typeof(owner); size_t oldelsz = 0, oldalign = 0; if (ownerty == (jl_value_t*)jl_string_type) { oldalign = 1; } else { jl_islayout_inline(jl_tparam0(ownerty), &oldelsz, &oldalign); } if (oldalign < align) jl_exceptionf(jl_argumenterror_type, "reinterpret from alignment %d bytes to alignment %d bytes not allowed", (int) oldalign, (int) align); a->flags.ptrarray = 0; } else { a->elsize = sizeof(void*); a->flags.ptrarray = 1; } // if data is itself a shared wrapper, // owner should point back to the original array jl_array_data_owner(a) = (jl_value_t*)owner; a->flags.how = 3; a->data = data->data; a->flags.isshared = 1; data->flags.isshared = 1; if (ndims == 1) { size_t l = dims[0]; #ifdef STORE_ARRAY_LEN a->length = l; #endif a->nrows = l; a->maxsize = l; } else { size_t *adims = &a->nrows; size_t l = 1; wideint_t prod; for (size_t i = 0; i < ndims; i++) { adims[i] = dims[i]; prod = (wideint_t)l * (wideint_t)adims[i]; if (prod > (wideint_t) MAXINTVAL) jl_error("invalid Array dimensions"); l = prod; } #ifdef STORE_ARRAY_LEN a->length = l; #endif } return a; }
static void jl_typemap_list_insert_sorted(jl_typemap_entry_t **pml, jl_value_t *parent, jl_typemap_entry_t *newrec, const struct jl_typemap_info *tparams) { jl_typemap_entry_t *l, **pl; pl = pml; l = *pml; jl_value_t *pa = parent; while (l != (void*)jl_nothing) { if (!l->isleafsig) { // quickly ignore all of the leafsig entries (these were handled by caller) if (jl_type_morespecific((jl_value_t*)newrec->sig, (jl_value_t*)l->sig)) { if (l->simplesig == (void*)jl_nothing || newrec->simplesig != (void*)jl_nothing || !jl_types_equal((jl_value_t*)l->sig, (jl_value_t*)newrec->sig)) { // might need to insert multiple entries for a lookup differing only by their simplesig // when simplesig contains a kind // TODO: make this test more correct or figure out a better way to compute this break; } } } pl = &l->next; pa = (jl_value_t*)l; l = l->next; } JL_SIGATOMIC_BEGIN(); newrec->next = l; jl_gc_wb(newrec, l); *pl = newrec; jl_gc_wb(pa, newrec); // if this contains Union types, methods after it might actually be // more specific than it. we need to re-sort them. if (has_unions((jl_value_t*)newrec->sig)) { jl_value_t *item_parent = (jl_value_t*)newrec; jl_value_t *next_parent = 0; jl_typemap_entry_t *item = newrec->next, *next; jl_typemap_entry_t **pitem = &newrec->next, **pnext; while (item != (void*)jl_nothing) { pl = pml; l = *pml; pa = parent; next = item->next; pnext = &item->next; next_parent = (jl_value_t*)item; while (l != newrec->next) { if (jl_type_morespecific((jl_value_t*)item->sig, (jl_value_t*)l->sig)) { // reinsert item earlier in the list *pitem = next; jl_gc_wb(item_parent, next); item->next = l; jl_gc_wb(item, item->next); *pl = item; jl_gc_wb(pa, item); pnext = pitem; next_parent = item_parent; break; } pl = &l->next; pa = (jl_value_t*)l; l = l->next; } item = next; pitem = pnext; item_parent = next_parent; } } JL_SIGATOMIC_END(); return; }
static jl_function_t *cache_method(jl_methtable_t *mt, jl_tuple_t *type, jl_function_t *method, jl_tuple_t *decl, jl_tuple_t *sparams) { size_t i; int need_dummy_entries = 0; jl_value_t *temp=NULL; jl_function_t *newmeth=NULL; JL_GC_PUSH(&type, &temp, &newmeth); for (i=0; i < type->length; i++) { jl_value_t *elt = jl_tupleref(type,i); int set_to_any = 0; if (nth_slot_type(decl,i) == jl_ANY_flag) { // don't specialize on slots marked ANY temp = jl_tupleref(type, i); jl_tupleset(type, i, (jl_value_t*)jl_any_type); int nintr=0; jl_methlist_t *curr = mt->defs; // if this method is the only match even with the current slot // set to Any, then it is safe to cache it that way. while (curr != NULL && curr->func!=method) { if (jl_type_intersection((jl_value_t*)curr->sig, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) { nintr++; break; } curr = curr->next; } if (nintr) { // TODO: even if different specializations of this slot need // separate cache entries, have them share code. jl_tupleset(type, i, temp); } else { set_to_any = 1; } } if (set_to_any) { } else if (jl_is_tuple(elt)) { /* don't cache tuple type exactly; just remember that it was a tuple, unless the declaration asks for something more specific. determined with a type intersection. */ int might_need_dummy=0; temp = jl_tupleref(type, i); if (i < decl->length) { jl_value_t *declt = jl_tupleref(decl,i); // for T..., intersect with T if (jl_is_seq_type(declt)) declt = jl_tparam0(declt); if (declt == (jl_value_t*)jl_tuple_type || jl_subtype((jl_value_t*)jl_tuple_type, declt, 0)) { // don't specialize args that matched (Any...) or Any jl_tupleset(type, i, (jl_value_t*)jl_tuple_type); might_need_dummy = 1; } else { declt = jl_type_intersection(declt, (jl_value_t*)jl_tuple_type); if (((jl_tuple_t*)elt)->length > 3 || tuple_all_Any((jl_tuple_t*)declt)) { jl_tupleset(type, i, declt); might_need_dummy = 1; } } } else { jl_tupleset(type, i, (jl_value_t*)jl_tuple_type); might_need_dummy = 1; } assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type); if (might_need_dummy) { jl_methlist_t *curr = mt->defs; // can't generalize type if there's an overlapping definition // with typevars while (curr != NULL && curr->func!=method) { if (curr->tvars!=jl_null && jl_type_intersection((jl_value_t*)curr->sig, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) { jl_tupleset(type, i, temp); might_need_dummy = 0; break; } curr = curr->next; } } if (might_need_dummy) { jl_methlist_t *curr = mt->defs; while (curr != NULL && curr->func!=method) { jl_tuple_t *sig = curr->sig; if (sig->length > i && jl_is_tuple(jl_tupleref(sig,i))) { need_dummy_entries = 1; break; } curr = curr->next; } } } else if (jl_is_type_type(elt) && jl_is_type_type(jl_tparam0(elt))) { /* actual argument was Type{...}, we computed its type as Type{Type{...}}. we must avoid unbounded nesting here, so cache the signature as Type{T}, unless something more specific like Type{Type{Int32}} was actually declared. this can be determined using a type intersection. */ if (i < decl->length) { jl_value_t *declt = jl_tupleref(decl,i); // for T..., intersect with T if (jl_is_seq_type(declt)) declt = jl_tparam0(declt); jl_tupleset(type, i, jl_type_intersection(declt, (jl_value_t*)jl_typetype_type)); } else { jl_tupleset(type, i, (jl_value_t*)jl_typetype_type); } assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type); } else if (jl_is_type_type(elt) && very_general_type(nth_slot_type(decl,i))) { /* here's a fairly complex heuristic: if this argument slot's declared type is Any, and no definition overlaps with Type for this slot, then don't specialize for every Type that might be passed. Since every type x has its own type Type{x}, this would be excessive specialization for an Any slot. */ int ok=1; jl_methlist_t *curr = mt->defs; while (curr != NULL) { jl_value_t *slottype = nth_slot_type(curr->sig, i); if (slottype && !very_general_type(slottype) && jl_type_intersection(slottype, (jl_value_t*)jl_type_type) != (jl_value_t*)jl_bottom_type) { ok=0; break; } curr = curr->next; } if (ok) { jl_tupleset(type, i, (jl_value_t*)jl_typetype_type); } } } // for varargs methods, only specialize up to max_args. // in general, here we want to find the biggest type that's not a // supertype of any other method signatures. so far we are conservative // and the types we find should be bigger. if (type->length > jl_unbox_long(mt->max_args) && jl_is_seq_type(jl_tupleref(decl,decl->length-1))) { size_t nspec = jl_unbox_long(mt->max_args)+2; jl_tuple_t *limited = jl_alloc_tuple(nspec); for(i=0; i < nspec-1; i++) { jl_tupleset(limited, i, jl_tupleref(type, i)); } jl_value_t *lasttype = jl_tupleref(type,i-1); // if all subsequent arguments are subtypes of lasttype, specialize // on that instead of decl. for example, if decl is // (Any...) // and type is // (Symbol, Symbol, Symbol) // then specialize as (Symbol...), but if type is // (Symbol, Int32, Expr) // then specialize as (Any...) size_t j = i; int all_are_subtypes=1; for(; j < type->length; j++) { if (!jl_subtype(jl_tupleref(type,j), lasttype, 0)) { all_are_subtypes = 0; break; } } type = limited; if (all_are_subtypes) { // avoid Type{Type{...}...}... if (jl_is_type_type(lasttype)) lasttype = (jl_value_t*)jl_type_type; temp = (jl_value_t*)jl_tuple1(lasttype); jl_tupleset(type, i, jl_apply_type((jl_value_t*)jl_seq_type, (jl_tuple_t*)temp)); } else { jl_value_t *lastdeclt = jl_tupleref(decl,decl->length-1); if (sparams->length > 0) { lastdeclt = (jl_value_t*) jl_instantiate_type_with((jl_type_t*)lastdeclt, sparams->data, sparams->length/2); } jl_tupleset(type, i, lastdeclt); } // now there is a problem: the computed signature is more // general than just the given arguments, so it might conflict // with another definition that doesn't have cache instances yet. // to fix this, we insert dummy cache entries for all intersections // of this signature and definitions. those dummy entries will // supersede this one in conflicted cases, alerting us that there // should actually be a cache miss. need_dummy_entries = 1; } if (need_dummy_entries) { temp = ml_matches(mt->defs, (jl_value_t*)type, lambda_sym, -1); for(i=0; i < jl_array_len(temp); i++) { jl_value_t *m = jl_cellref(temp, i); if (jl_tupleref(m,2) != (jl_value_t*)method->linfo) { jl_method_cache_insert(mt, (jl_tuple_t*)jl_tupleref(m, 0), NULL); } } } // here we infer types and specialize the method /* if (sparams==jl_null) newmeth = method; else */ jl_array_t *lilist=NULL; jl_lambda_info_t *li=NULL; if (method->linfo && method->linfo->specializations!=NULL) { // reuse code already generated for this combination of lambda and // arguments types. this happens for inner generic functions where // a new closure is generated on each call to the enclosing function. lilist = method->linfo->specializations; int k; for(k=0; k < lilist->length; k++) { li = (jl_lambda_info_t*)jl_cellref(lilist, k); if (jl_types_equal(li->specTypes, (jl_value_t*)type)) break; } if (k == lilist->length) lilist=NULL; } if (lilist != NULL && !li->inInference) { assert(li); newmeth = jl_reinstantiate_method(method, li); (void)jl_method_cache_insert(mt, type, newmeth); JL_GC_POP(); return newmeth; } else { newmeth = jl_instantiate_method(method, sparams); } /* if "method" itself can ever be compiled, for example for use as an unspecialized method (see below), then newmeth->fptr might point to some slow compiled code instead of jl_trampoline, meaning our type-inferred code would never get compiled. this can be fixed with the commented-out snippet below. */ assert(!(newmeth->linfo && newmeth->linfo->ast) || newmeth->fptr == &jl_trampoline); /* if (newmeth->linfo&&newmeth->linfo->ast&&newmeth->fptr!=&jl_trampoline) { newmeth->fptr = &jl_trampoline; } */ (void)jl_method_cache_insert(mt, type, newmeth); if (newmeth->linfo != NULL && newmeth->linfo->sparams == jl_null) { // when there are no static parameters, one unspecialized version // of a function can be shared among all cached specializations. if (method->linfo->unspecialized == NULL) { method->linfo->unspecialized = jl_instantiate_method(method, jl_null); } newmeth->linfo->unspecialized = method->linfo->unspecialized; } if (newmeth->linfo != NULL && newmeth->linfo->ast != NULL) { newmeth->linfo->specTypes = (jl_value_t*)type; jl_array_t *spe = method->linfo->specializations; if (spe == NULL) { spe = jl_alloc_cell_1d(1); jl_cellset(spe, 0, newmeth->linfo); } else { jl_cell_1d_push(spe, (jl_value_t*)newmeth->linfo); } method->linfo->specializations = spe; jl_type_infer(newmeth->linfo, type, method->linfo); } JL_GC_POP(); return newmeth; }