static jl_function_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_tuple_t *tt, int cache) { jl_methlist_t *m = mt->defs; size_t nargs = tt->length; size_t i; jl_value_t *env = jl_false; while (m != NULL) { if (m->tvars!=jl_null) { env = jl_type_match((jl_value_t*)tt, (jl_value_t*)m->sig); if (env != (jl_value_t*)jl_false) break; } else if (jl_tuple_subtype(&jl_tupleref(tt,0), nargs, &jl_tupleref(m->sig,0), ((jl_tuple_t*)m->sig)->length, 0, 0)) { break; } m = m->next; } if (env == (jl_value_t*)jl_false) { if (m != NULL) { if (!cache) { return m->func; } return cache_method(mt, tt, m->func, (jl_tuple_t*)m->sig, jl_null); } return NULL; } jl_tuple_t *newsig=NULL; JL_GC_PUSH(&env, &newsig); assert(jl_is_tuple(env)); jl_tuple_t *tpenv = (jl_tuple_t*)env; // don't bother computing this if no arguments are tuples for(i=0; i < tt->length; i++) { if (jl_is_tuple(jl_tupleref(tt,i))) break; } if (i < tt->length) { newsig = (jl_tuple_t*)jl_instantiate_type_with((jl_type_t*)m->sig, &jl_tupleref(tpenv,0), tpenv->length/2); } else { newsig = (jl_tuple_t*)m->sig; } assert(jl_is_tuple(newsig)); jl_function_t *nf; if (!cache) nf = m->func; else nf = cache_method(mt, tt, m->func, newsig, tpenv); JL_GC_POP(); return nf; }
// f{<:Union{...}}(...) is a common pattern // and expanding the Union may give a leaf function static void _compile_all_tvar_union(jl_value_t *methsig) { if (!jl_is_unionall(methsig) && jl_is_leaf_type(methsig)) { // usually can create a specialized version of the function, // if the signature is already a leaftype if (jl_compile_hint((jl_tupletype_t*)methsig)) return; } int tvarslen = jl_subtype_env_size(methsig); jl_value_t *sigbody = methsig; jl_value_t **env; JL_GC_PUSHARGS(env, 2 * tvarslen); int *idx = (int*)alloca(sizeof(int) * tvarslen); int i; for (i = 0; i < tvarslen; i++) { assert(jl_is_unionall(sigbody)); idx[i] = 0; env[2 * i] = (jl_value_t*)((jl_unionall_t*)sigbody)->var; env[2 * i + 1] = jl_bottom_type; // initialize the list with Union{}, since T<:Union{} is always a valid option sigbody = ((jl_unionall_t*)sigbody)->body; } for (i = 0; i < tvarslen; /* incremented by inner loop */) { jl_value_t *sig; JL_TRY { // TODO: wrap in UnionAll for each tvar in env[2*i + 1] ? // currently doesn't matter much, since jl_compile_hint doesn't work on abstract types sig = (jl_value_t*)jl_instantiate_type_with(sigbody, env, tvarslen); } JL_CATCH { goto getnext; // sigh, we found an invalid type signature. should we warn the user? } assert(jl_is_tuple_type(sig)); if (sig == jl_bottom_type || tupletype_any_bottom(sig)) goto getnext; // signature wouldn't be callable / is invalid -- skip it if (jl_is_leaf_type(sig)) { if (jl_compile_hint((jl_tupletype_t*)sig)) goto getnext; // success } getnext: for (i = 0; i < tvarslen; i++) { jl_tvar_t *tv = (jl_tvar_t*)env[2 * i]; if (jl_is_uniontype(tv->ub)) { size_t l = jl_count_union_components(tv->ub); size_t j = idx[i]; if (j == l) { env[2 * i + 1] = jl_bottom_type; idx[i] = 0; } else { jl_value_t *ty = jl_nth_union_component(tv->ub, j); if (!jl_is_leaf_type(ty)) ty = (jl_value_t*)jl_new_typevar(tv->name, tv->lb, ty); env[2 * i + 1] = ty; idx[i] = j + 1; break; } } else { env[2 * i + 1] = (jl_value_t*)tv; } } } JL_GC_POP(); }
static jl_function_t *cache_method(jl_methtable_t *mt, jl_tuple_t *type, jl_function_t *method, jl_tuple_t *decl, jl_tuple_t *sparams) { size_t i; int need_dummy_entries = 0; jl_value_t *temp=NULL; jl_function_t *newmeth=NULL; JL_GC_PUSH(&type, &temp, &newmeth); for (i=0; i < type->length; i++) { jl_value_t *elt = jl_tupleref(type,i); int set_to_any = 0; if (nth_slot_type(decl,i) == jl_ANY_flag) { // don't specialize on slots marked ANY temp = jl_tupleref(type, i); jl_tupleset(type, i, (jl_value_t*)jl_any_type); int nintr=0; jl_methlist_t *curr = mt->defs; // if this method is the only match even with the current slot // set to Any, then it is safe to cache it that way. while (curr != NULL && curr->func!=method) { if (jl_type_intersection((jl_value_t*)curr->sig, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) { nintr++; break; } curr = curr->next; } if (nintr) { // TODO: even if different specializations of this slot need // separate cache entries, have them share code. jl_tupleset(type, i, temp); } else { set_to_any = 1; } } if (set_to_any) { } else if (jl_is_tuple(elt)) { /* don't cache tuple type exactly; just remember that it was a tuple, unless the declaration asks for something more specific. determined with a type intersection. */ int might_need_dummy=0; temp = jl_tupleref(type, i); if (i < decl->length) { jl_value_t *declt = jl_tupleref(decl,i); // for T..., intersect with T if (jl_is_seq_type(declt)) declt = jl_tparam0(declt); if (declt == (jl_value_t*)jl_tuple_type || jl_subtype((jl_value_t*)jl_tuple_type, declt, 0)) { // don't specialize args that matched (Any...) or Any jl_tupleset(type, i, (jl_value_t*)jl_tuple_type); might_need_dummy = 1; } else { declt = jl_type_intersection(declt, (jl_value_t*)jl_tuple_type); if (((jl_tuple_t*)elt)->length > 3 || tuple_all_Any((jl_tuple_t*)declt)) { jl_tupleset(type, i, declt); might_need_dummy = 1; } } } else { jl_tupleset(type, i, (jl_value_t*)jl_tuple_type); might_need_dummy = 1; } assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type); if (might_need_dummy) { jl_methlist_t *curr = mt->defs; // can't generalize type if there's an overlapping definition // with typevars while (curr != NULL && curr->func!=method) { if (curr->tvars!=jl_null && jl_type_intersection((jl_value_t*)curr->sig, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) { jl_tupleset(type, i, temp); might_need_dummy = 0; break; } curr = curr->next; } } if (might_need_dummy) { jl_methlist_t *curr = mt->defs; while (curr != NULL && curr->func!=method) { jl_tuple_t *sig = curr->sig; if (sig->length > i && jl_is_tuple(jl_tupleref(sig,i))) { need_dummy_entries = 1; break; } curr = curr->next; } } } else if (jl_is_type_type(elt) && jl_is_type_type(jl_tparam0(elt))) { /* actual argument was Type{...}, we computed its type as Type{Type{...}}. we must avoid unbounded nesting here, so cache the signature as Type{T}, unless something more specific like Type{Type{Int32}} was actually declared. this can be determined using a type intersection. */ if (i < decl->length) { jl_value_t *declt = jl_tupleref(decl,i); // for T..., intersect with T if (jl_is_seq_type(declt)) declt = jl_tparam0(declt); jl_tupleset(type, i, jl_type_intersection(declt, (jl_value_t*)jl_typetype_type)); } else { jl_tupleset(type, i, (jl_value_t*)jl_typetype_type); } assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type); } else if (jl_is_type_type(elt) && very_general_type(nth_slot_type(decl,i))) { /* here's a fairly complex heuristic: if this argument slot's declared type is Any, and no definition overlaps with Type for this slot, then don't specialize for every Type that might be passed. Since every type x has its own type Type{x}, this would be excessive specialization for an Any slot. */ int ok=1; jl_methlist_t *curr = mt->defs; while (curr != NULL) { jl_value_t *slottype = nth_slot_type(curr->sig, i); if (slottype && !very_general_type(slottype) && jl_type_intersection(slottype, (jl_value_t*)jl_type_type) != (jl_value_t*)jl_bottom_type) { ok=0; break; } curr = curr->next; } if (ok) { jl_tupleset(type, i, (jl_value_t*)jl_typetype_type); } } } // for varargs methods, only specialize up to max_args. // in general, here we want to find the biggest type that's not a // supertype of any other method signatures. so far we are conservative // and the types we find should be bigger. if (type->length > jl_unbox_long(mt->max_args) && jl_is_seq_type(jl_tupleref(decl,decl->length-1))) { size_t nspec = jl_unbox_long(mt->max_args)+2; jl_tuple_t *limited = jl_alloc_tuple(nspec); for(i=0; i < nspec-1; i++) { jl_tupleset(limited, i, jl_tupleref(type, i)); } jl_value_t *lasttype = jl_tupleref(type,i-1); // if all subsequent arguments are subtypes of lasttype, specialize // on that instead of decl. for example, if decl is // (Any...) // and type is // (Symbol, Symbol, Symbol) // then specialize as (Symbol...), but if type is // (Symbol, Int32, Expr) // then specialize as (Any...) size_t j = i; int all_are_subtypes=1; for(; j < type->length; j++) { if (!jl_subtype(jl_tupleref(type,j), lasttype, 0)) { all_are_subtypes = 0; break; } } type = limited; if (all_are_subtypes) { // avoid Type{Type{...}...}... if (jl_is_type_type(lasttype)) lasttype = (jl_value_t*)jl_type_type; temp = (jl_value_t*)jl_tuple1(lasttype); jl_tupleset(type, i, jl_apply_type((jl_value_t*)jl_seq_type, (jl_tuple_t*)temp)); } else { jl_value_t *lastdeclt = jl_tupleref(decl,decl->length-1); if (sparams->length > 0) { lastdeclt = (jl_value_t*) jl_instantiate_type_with((jl_type_t*)lastdeclt, sparams->data, sparams->length/2); } jl_tupleset(type, i, lastdeclt); } // now there is a problem: the computed signature is more // general than just the given arguments, so it might conflict // with another definition that doesn't have cache instances yet. // to fix this, we insert dummy cache entries for all intersections // of this signature and definitions. those dummy entries will // supersede this one in conflicted cases, alerting us that there // should actually be a cache miss. need_dummy_entries = 1; } if (need_dummy_entries) { temp = ml_matches(mt->defs, (jl_value_t*)type, lambda_sym, -1); for(i=0; i < jl_array_len(temp); i++) { jl_value_t *m = jl_cellref(temp, i); if (jl_tupleref(m,2) != (jl_value_t*)method->linfo) { jl_method_cache_insert(mt, (jl_tuple_t*)jl_tupleref(m, 0), NULL); } } } // here we infer types and specialize the method /* if (sparams==jl_null) newmeth = method; else */ jl_array_t *lilist=NULL; jl_lambda_info_t *li=NULL; if (method->linfo && method->linfo->specializations!=NULL) { // reuse code already generated for this combination of lambda and // arguments types. this happens for inner generic functions where // a new closure is generated on each call to the enclosing function. lilist = method->linfo->specializations; int k; for(k=0; k < lilist->length; k++) { li = (jl_lambda_info_t*)jl_cellref(lilist, k); if (jl_types_equal(li->specTypes, (jl_value_t*)type)) break; } if (k == lilist->length) lilist=NULL; } if (lilist != NULL && !li->inInference) { assert(li); newmeth = jl_reinstantiate_method(method, li); (void)jl_method_cache_insert(mt, type, newmeth); JL_GC_POP(); return newmeth; } else { newmeth = jl_instantiate_method(method, sparams); } /* if "method" itself can ever be compiled, for example for use as an unspecialized method (see below), then newmeth->fptr might point to some slow compiled code instead of jl_trampoline, meaning our type-inferred code would never get compiled. this can be fixed with the commented-out snippet below. */ assert(!(newmeth->linfo && newmeth->linfo->ast) || newmeth->fptr == &jl_trampoline); /* if (newmeth->linfo&&newmeth->linfo->ast&&newmeth->fptr!=&jl_trampoline) { newmeth->fptr = &jl_trampoline; } */ (void)jl_method_cache_insert(mt, type, newmeth); if (newmeth->linfo != NULL && newmeth->linfo->sparams == jl_null) { // when there are no static parameters, one unspecialized version // of a function can be shared among all cached specializations. if (method->linfo->unspecialized == NULL) { method->linfo->unspecialized = jl_instantiate_method(method, jl_null); } newmeth->linfo->unspecialized = method->linfo->unspecialized; } if (newmeth->linfo != NULL && newmeth->linfo->ast != NULL) { newmeth->linfo->specTypes = (jl_value_t*)type; jl_array_t *spe = method->linfo->specializations; if (spe == NULL) { spe = jl_alloc_cell_1d(1); jl_cellset(spe, 0, newmeth->linfo); } else { jl_cell_1d_push(spe, (jl_value_t*)newmeth->linfo); } method->linfo->specializations = spe; jl_type_infer(newmeth->linfo, type, method->linfo); } JL_GC_POP(); return newmeth; }