示例#1
0
文件: typemap.c 项目: ararslan/julia
static int sig_match_by_type_leaf(jl_value_t **types, jl_tupletype_t *sig, size_t n)
{
    size_t i;
    for(i=0; i < n; i++) {
        jl_value_t *decl = jl_field_type(sig, i);
        jl_value_t *a = types[i];
        if (jl_is_type_type(a)) // decl is not Type, because it wouldn't be leafsig
            a = jl_typeof(jl_tparam0(a));
        if (!jl_types_equal(a, decl))
            return 0;
    }
    return 1;
}
示例#2
0
// this is the general entry point for looking up a type in the cache
// (as a subtype, or with typeseq)
jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_tupletype_t *types, jl_svec_t **penv,
        int8_t subtype_inexact__sigseq_useenv, int8_t subtype, int8_t offs)
{
    if (jl_typeof(ml_or_cache.unknown) == (jl_value_t*)jl_typemap_level_type) {
        jl_typemap_level_t *cache = ml_or_cache.node;
        // called object is the primary key for constructors, otherwise first argument
        jl_value_t *ty = NULL;
        if (jl_datatype_nfields(types) > offs) {
            ty = jl_tparam(types, offs);
            if (cache->targ != (void*)jl_nothing && jl_is_type_type(ty)) {
                jl_value_t *a0 = jl_tparam0(ty);
                if (jl_is_datatype(a0)) {
                    union jl_typemap_t ml = mtcache_hash_lookup(cache->targ, a0, 1, offs);
                    if (ml.unknown != jl_nothing) {
                        jl_typemap_entry_t *li = jl_typemap_assoc_by_type(ml, types, penv,
                                subtype_inexact__sigseq_useenv, subtype, offs+1);
                        if (li) return li;
                    }
                }
            }
            if (cache->arg1 != (void*)jl_nothing && jl_is_datatype(ty)) {
                union jl_typemap_t ml = mtcache_hash_lookup(cache->arg1, ty, 0, offs);
                if (ml.unknown != jl_nothing) {
                    jl_typemap_entry_t *li = jl_typemap_assoc_by_type(ml, types, penv,
                            subtype_inexact__sigseq_useenv, subtype, offs+1);
                    if (li) return li;
                }
            }
        }
        if (subtype) {
            jl_typemap_entry_t *li = jl_typemap_assoc_by_type_(cache->linear, types, subtype_inexact__sigseq_useenv, penv);
            if (li) return li;
            return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1);
        }
        else {
            if (ty && jl_is_any(ty))
                return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1);
            else
                return jl_typemap_lookup_by_type_(cache->linear, types, subtype_inexact__sigseq_useenv);
        }
    }
    else {
        return subtype ?
            jl_typemap_assoc_by_type_(ml_or_cache.leaf, types, subtype_inexact__sigseq_useenv, penv) :
            jl_typemap_lookup_by_type_(ml_or_cache.leaf, types, subtype_inexact__sigseq_useenv);
    }
}
示例#3
0
static inline int sig_match_simple(jl_value_t **args, size_t n, jl_value_t **sig,
                                   int va, size_t lensig)
{
    // NOTE: This function is a performance hot spot!!
    for(size_t i=0; i < n; i++) {
        jl_value_t *decl = sig[i];
        if (i == lensig-1) {
            if (va) {
                jl_value_t *t = jl_tparam0(decl);
                for(; i < n; i++) {
                    if (!jl_subtype(args[i], t, 1))
                        return 0;
                }
                return 1;
            }
        }
        jl_value_t *a = args[i];
        if (decl == (jl_value_t*)jl_any_type) {
        }
        else if ((jl_value_t*)jl_typeof(a) == decl) {
            /*
              we are only matching concrete types here, and those types are
              hash-consed, so pointer comparison should work.
            */
        }
        else if (jl_is_type_type(decl) && jl_is_type(a)) {
            jl_value_t *tp0 = jl_tparam0(decl);
            if (tp0 == (jl_value_t*)jl_typetype_tvar) {
                // in the case of Type{T}, the types don't have
                // to match exactly either. this is cached as Type{T}.
                // analogous to the situation with tuples.
            }
            else if (jl_is_typevar(tp0)) {
                if (!jl_subtype(a, ((jl_tvar_t*)tp0)->ub, 0))
                    return 0;
            }
            else {
                if (a!=tp0 && !jl_types_equal(a,tp0))
                    return 0;
            }
        }
        else {
            return 0;
        }
    }
    return 1;
}
示例#4
0
文件: typemap.c 项目: DrGar/julia
static void jl_typemap_level_insert_(jl_typemap_level_t *cache, jl_typemap_entry_t *newrec, int8_t offs,
        const struct jl_typemap_info *tparams)
{
    if (jl_datatype_nfields(newrec->sig) > offs) {
        jl_value_t *t1 = jl_tparam(newrec->sig, offs);
        // if t1 != jl_typetype_type and the argument is Type{...}, this
        // method has specializations for singleton kinds and we use
        // the table indexed for that purpose.
        if (t1 != (jl_value_t*)jl_typetype_type && jl_is_type_type(t1)) {
            jl_value_t *a0 = jl_tparam0(t1);
            if (jl_typemap_array_insert_(&cache->targ, a0, newrec, (jl_value_t*)cache, 1, offs, tparams))
                return;
        }
        if (jl_typemap_array_insert_(&cache->arg1, t1, newrec, (jl_value_t*)cache, 0, offs, tparams))
            return;
    }
    jl_typemap_list_insert_(&cache->linear, (jl_value_t*)cache, newrec, tparams);
}
示例#5
0
文件: gf.c 项目: cshen/julia
static
jl_function_t *jl_method_cache_insert(jl_methtable_t *mt, jl_tuple_t *type,
                                      jl_function_t *method)
{
    jl_methlist_t **pml = &mt->cache;
    if (type->length > 0) {
        jl_value_t *t0 = jl_t0(type);
        uptrint_t uid=0;
        // if t0 != jl_typetype_type and the argument is Type{...}, this
        // method has specializations for singleton kinds and we use
        // the table indexed for that purpose.
        if (t0 != (jl_value_t*)jl_typetype_type && jl_is_type_type(t0)) {
            jl_value_t *a0 = jl_tparam0(t0);
            if (jl_is_struct_type(a0))
                uid = ((jl_struct_type_t*)a0)->uid;
            else if (jl_is_bits_type(a0))
                uid = ((jl_bits_type_t*)a0)->uid;
            if (uid > 0) {
                if (mt->cache_targ == NULL)
                    mt->cache_targ = jl_alloc_cell_1d(0);
                if (uid >= jl_array_len(mt->cache_targ)) {
                    jl_array_grow_end(mt->cache_targ, uid+4-jl_array_len(mt->cache_targ));
                }
                pml = (jl_methlist_t**)&jl_cellref(mt->cache_targ, uid);
                goto ml_do_insert;
            }
        }
        if (jl_is_struct_type(t0))
            uid = ((jl_struct_type_t*)t0)->uid;
        else if (jl_is_bits_type(t0))
            uid = ((jl_bits_type_t*)t0)->uid;
        if (uid > 0) {
            if (mt->cache_arg1 == NULL)
                mt->cache_arg1 = jl_alloc_cell_1d(0);
            if (uid >= jl_array_len(mt->cache_arg1)) {
                jl_array_grow_end(mt->cache_arg1, uid+4-jl_array_len(mt->cache_arg1));
            }
            pml = (jl_methlist_t**)&jl_cellref(mt->cache_arg1, uid);
        }
    }
 ml_do_insert:
    return jl_method_list_insert(pml, type, method, jl_null, 0)->func;
}
示例#6
0
文件: typemap.c 项目: ararslan/julia
static void jl_typemap_level_insert_(jl_typemap_level_t *cache, jl_typemap_entry_t *newrec, int8_t offs,
                                     const struct jl_typemap_info *tparams)
{
    jl_value_t *ttypes = jl_unwrap_unionall((jl_value_t*)newrec->sig);
    size_t l = jl_field_count(ttypes);
    // compute the type at offset `offs` into `sig`, which may be a Vararg
    jl_value_t *t1 = NULL;
    int isva = 0;
    if (l <= offs + 1) {
        t1 = jl_tparam(ttypes, l - 1);
        if (jl_is_vararg_type(t1)) {
            isva = 1;
            t1 = jl_unwrap_vararg(t1);
        }
        else if (l <= offs) {
            t1 = NULL;
        }
    }
    else if (l > offs) {
        t1 = jl_tparam(ttypes, offs);
    }
    // If the type at `offs` is Any, put it in the Any list
    if (t1 && jl_is_any(t1)) {
        jl_typemap_insert_generic(&cache->any, (jl_value_t*)cache, newrec, (jl_value_t*)jl_any_type, offs+1, tparams);
        return;
    }
    // Don't put Varargs in the optimized caches (too hard to handle in lookup and bp)
    if (t1 && !isva) {
        // if t1 != jl_typetype_type and the argument is Type{...}, this
        // method has specializations for singleton kinds and we use
        // the table indexed for that purpose.
        if (t1 != (jl_value_t*)jl_typetype_type && jl_is_type_type(t1)) {
            jl_value_t *a0 = jl_tparam0(t1);
            if (jl_typemap_array_insert_(&cache->targ, a0, newrec, (jl_value_t*)cache, 1, offs, tparams))
                return;
        }
        if (jl_typemap_array_insert_(&cache->arg1, t1, newrec, (jl_value_t*)cache, 0, offs, tparams))
            return;
    }
    jl_typemap_list_insert_(&cache->linear, (jl_value_t*)cache, newrec, tparams);
}
示例#7
0
文件: gf.c 项目: cshen/julia
/*
  Method caches are divided into three parts: one for signatures where
  the first argument is a singleton kind (Type{Foo}), one indexed by the
  UID of the first argument's type in normal cases, and a fallback
  table of everything else.
*/
static jl_function_t *jl_method_table_assoc_exact_by_type(jl_methtable_t *mt,
                                                          jl_tuple_t *types)
{
    jl_methlist_t *ml = NULL;
    if (types->length > 0) {
        jl_value_t *ty = jl_t0(types);
        uptrint_t uid;
        if (jl_is_type_type(ty)) {
            jl_value_t *a0 = jl_tparam0(ty);
            jl_value_t *tty = (jl_value_t*)jl_typeof(a0);
            if ((tty == (jl_value_t*)jl_struct_kind && (uid = ((jl_struct_type_t*)a0)->uid)) ||
                (tty == (jl_value_t*)jl_bits_kind   && (uid = ((jl_bits_type_t*)a0)->uid))) {
                if (mt->cache_targ &&
                    uid < jl_array_len(mt->cache_targ)) {
                    ml = (jl_methlist_t*)jl_cellref(mt->cache_targ, uid);
                    if (ml)
                        goto mt_assoc_bt_lkup;
                }
            }
        }
        if ((jl_is_struct_type(ty) && (uid = ((jl_struct_type_t*)ty)->uid)) ||
            (jl_is_bits_type(ty)   && (uid = ((jl_bits_type_t*)ty)->uid))) {
            if (mt->cache_arg1 && uid < jl_array_len(mt->cache_arg1)) {
                ml = (jl_methlist_t*)jl_cellref(mt->cache_arg1, uid);
            }
        }
    }
    if (ml == NULL)
        ml = mt->cache;
 mt_assoc_bt_lkup:
    while (ml != NULL) {
        if (cache_match_by_type(&jl_tupleref(types,0), types->length,
                                (jl_tuple_t*)ml->sig, ml->va==jl_true)) {
            return ml->func;
        }
        ml = ml->next;
    }
    return NULL;
}
示例#8
0
// this is the general entry point for looking up a type in the cache
// (as a subtype, or with typeseq)
jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_tupletype_t *types, jl_svec_t **penv,
        int8_t subtype_inexact__sigseq_useenv, int8_t subtype, int8_t offs)
{
    if (jl_typeof(ml_or_cache.unknown) == (jl_value_t*)jl_typemap_level_type) {
        jl_typemap_level_t *cache = ml_or_cache.node;
        // called object is the primary key for constructors, otherwise first argument
        jl_value_t *ty = NULL;
        size_t l = jl_field_count(types);
        int isva = 0;
        // compute the type at offset `offs` into `types`, which may be a Vararg
        if (l <= offs + 1) {
            ty = jl_tparam(types, l - 1);
            if (jl_is_vararg_type(ty)) {
                ty = jl_tparam0(ty);
                isva = 1;
            }
            else if (l <= offs) {
                ty = NULL;
            }
        }
        else if (l > offs) {
            ty = jl_tparam(types, offs);
        }
        // If there is a type at offs, look in the optimized caches
        if (!subtype) {
            if (ty && jl_is_any(ty))
                return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1);
            if (isva) // in lookup mode, want to match Vararg exactly, not as a subtype
                ty = NULL;
        }
        if (ty) {
            if (jl_is_type_type(ty)) {
                jl_value_t *a0 = jl_tparam0(ty);
                if (cache->targ.values != (void*)jl_nothing && jl_is_datatype(a0)) {
                    union jl_typemap_t ml = mtcache_hash_lookup(&cache->targ, a0, 1, offs);
                    if (ml.unknown != jl_nothing) {
                        jl_typemap_entry_t *li = jl_typemap_assoc_by_type(ml, types, penv,
                                subtype_inexact__sigseq_useenv, subtype, offs+1);
                        if (li) return li;
                    }
                }
                if (!subtype && is_cache_leaf(a0)) return NULL;
            }
            if (cache->arg1.values != (void*)jl_nothing && jl_is_datatype(ty)) {
                union jl_typemap_t ml = mtcache_hash_lookup(&cache->arg1, ty, 0, offs);
                if (ml.unknown != jl_nothing) {
                    jl_typemap_entry_t *li = jl_typemap_assoc_by_type(ml, types, penv,
                            subtype_inexact__sigseq_useenv, subtype, offs+1);
                    if (li) return li;
                }
            }
            if (!subtype && is_cache_leaf(ty)) return NULL;
        }
        // Always check the list (since offs doesn't always start at 0)
        if (subtype) {
            jl_typemap_entry_t *li = jl_typemap_assoc_by_type_(cache->linear, types, subtype_inexact__sigseq_useenv, penv);
            if (li) return li;
            return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1);
        }
        else {
            return jl_typemap_lookup_by_type_(cache->linear, types, subtype_inexact__sigseq_useenv);
        }
    }
    else {
        return subtype ?
            jl_typemap_assoc_by_type_(ml_or_cache.leaf, types, subtype_inexact__sigseq_useenv, penv) :
            jl_typemap_lookup_by_type_(ml_or_cache.leaf, types, subtype_inexact__sigseq_useenv);
    }
}
示例#9
0
jl_typemap_entry_t *jl_typemap_insert(union jl_typemap_t *cache, jl_value_t *parent,
                                      jl_tupletype_t *type, jl_svec_t *tvars,
                                      jl_tupletype_t *simpletype, jl_svec_t *guardsigs,
                                      jl_value_t *newvalue, int8_t offs,
                                      const struct jl_typemap_info *tparams,
                                      jl_value_t **overwritten)
{
    jl_ptls_t ptls = jl_get_ptls_states();
    assert(jl_is_tuple_type(type));
    if (!simpletype) {
        simpletype = (jl_tupletype_t*)jl_nothing;
    }

    if ((jl_value_t*)simpletype == jl_nothing) {
        jl_typemap_entry_t *ml = jl_typemap_assoc_by_type(*cache, type, NULL, 1, 0, offs);
        if (ml && ml->simplesig == (void*)jl_nothing) {
            if (overwritten != NULL)
                *overwritten = ml->func.value;
            if (newvalue == NULL)  // don't overwrite with guard entries
                return ml;
            // sigatomic begin
            ml->sig = type;
            jl_gc_wb(ml, ml->sig);
            ml->simplesig = simpletype;
            jl_gc_wb(ml, ml->simplesig);
            ml->tvars = tvars;
            jl_gc_wb(ml, ml->tvars);
            ml->va = jl_is_va_tuple(type);
            // TODO: `l->func` or `l->func->roots` might need to be rooted
            ml->func.value = newvalue;
            if (newvalue)
                jl_gc_wb(ml, newvalue);
            // sigatomic end
            return ml;
        }
    }
    if (overwritten != NULL)
        *overwritten = NULL;

    jl_typemap_entry_t *newrec =
        (jl_typemap_entry_t*)jl_gc_alloc(ptls, sizeof(jl_typemap_entry_t),
                                         jl_typemap_entry_type);
    newrec->sig = type;
    newrec->simplesig = simpletype;
    newrec->tvars = tvars;
    newrec->func.value = newvalue;
    newrec->guardsigs = guardsigs;
    newrec->next = (jl_typemap_entry_t*)jl_nothing;
    // compute the complexity of this type signature
    newrec->va = jl_is_va_tuple(type);
    newrec->issimplesig = (tvars == jl_emptysvec); // a TypeVar environment needs an complex matching test
    newrec->isleafsig = newrec->issimplesig && !newrec->va; // entirely leaf types don't need to be sorted
    JL_GC_PUSH1(&newrec);
    size_t i, l;
    for (i = 0, l = jl_field_count(type); i < l && newrec->issimplesig; i++) {
        jl_value_t *decl = jl_field_type(type, i);
        if (decl == (jl_value_t*)jl_datatype_type)
            newrec->isleafsig = 0; // Type{} may have a higher priority than DataType
        else if (decl == (jl_value_t*)jl_typector_type)
            newrec->isleafsig = 0; // Type{} may have a higher priority than TypeConstructor
        else if (jl_is_type_type(decl))
            newrec->isleafsig = 0; // Type{} may need special processing to compute the match
        else if (jl_is_vararg_type(decl))
            newrec->isleafsig = 0; // makes iteration easier when the endpoints are the same
        else if (decl == (jl_value_t*)jl_any_type)
            newrec->isleafsig = 0; // Any needs to go in the general cache
        else if (!jl_is_leaf_type(decl)) // anything else can go through the general subtyping test
            newrec->isleafsig = newrec->issimplesig = 0;
    }
    // TODO: assert that guardsigs == jl_emptysvec && simplesig == jl_nothing if isleafsig and optimize with that knowledge?
    jl_typemap_insert_generic(cache, parent, newrec, NULL, offs, tparams);
    JL_GC_POP();
    return newrec;
}
示例#10
0
文件: typemap.c 项目: ararslan/julia
jl_typemap_entry_t *jl_typemap_insert(union jl_typemap_t *cache, jl_value_t *parent,
                                      jl_tupletype_t *type,
                                      jl_tupletype_t *simpletype, jl_svec_t *guardsigs,
                                      jl_value_t *newvalue, int8_t offs,
                                      const struct jl_typemap_info *tparams,
                                      size_t min_world, size_t max_world,
                                      jl_value_t **overwritten)
{
    jl_ptls_t ptls = jl_get_ptls_states();
    assert(min_world > 0 && max_world > 0);
    if (!simpletype)
        simpletype = (jl_tupletype_t*)jl_nothing;
    jl_value_t *ttype = jl_unwrap_unionall((jl_value_t*)type);

    if ((jl_value_t*)simpletype == jl_nothing) {
        jl_typemap_entry_t *ml = jl_typemap_assoc_by_type(*cache, (jl_value_t*)type, NULL, 0, offs, min_world, 0);
        if (ml && ml->simplesig == (void*)jl_nothing) {
            if (overwritten != NULL)
                *overwritten = ml->func.value;
            if (newvalue == ml->func.value) // no change. TODO: involve world in computation!
                return ml;
            if (newvalue == NULL)  // don't overwrite with guard entries
                return ml;
            ml->max_world = min_world - 1;
        }
    }

    jl_typemap_entry_t *newrec =
        (jl_typemap_entry_t*)jl_gc_alloc(ptls, sizeof(jl_typemap_entry_t),
                                         jl_typemap_entry_type);
    newrec->sig = type;
    newrec->simplesig = simpletype;
    newrec->func.value = newvalue;
    newrec->guardsigs = guardsigs;
    newrec->next = (jl_typemap_entry_t*)jl_nothing;
    newrec->min_world = min_world;
    newrec->max_world = max_world;
    // compute the complexity of this type signature
    newrec->va = jl_is_va_tuple((jl_datatype_t*)ttype);
    newrec->issimplesig = !jl_is_unionall(type); // a TypeVar environment needs a complex matching test
    newrec->isleafsig = newrec->issimplesig && !newrec->va; // entirely leaf types don't need to be sorted
    JL_GC_PUSH1(&newrec);
    assert(jl_is_tuple_type(ttype));
    size_t i, l;
    for (i = 0, l = jl_field_count(ttype); i < l && newrec->issimplesig; i++) {
        jl_value_t *decl = jl_field_type(ttype, i);
        if (jl_is_kind(decl))
            newrec->isleafsig = 0; // Type{} may have a higher priority than a kind
        else if (jl_is_type_type(decl))
            newrec->isleafsig = 0; // Type{} may need special processing to compute the match
        else if (jl_is_vararg_type(decl))
            newrec->isleafsig = 0; // makes iteration easier when the endpoints are the same
        else if (decl == (jl_value_t*)jl_any_type)
            newrec->isleafsig = 0; // Any needs to go in the general cache
        else if (!jl_is_concrete_type(decl)) // anything else needs to go through the general subtyping test
            newrec->isleafsig = newrec->issimplesig = 0;
    }
    // TODO: assert that guardsigs == jl_emptysvec && simplesig == jl_nothing if isleafsig and optimize with that knowledge?
    jl_typemap_insert_generic(cache, parent, newrec, NULL, offs, tparams);
    JL_GC_POP();
    return newrec;
}
示例#11
0
文件: gf.c 项目: cshen/julia
static jl_function_t *cache_method(jl_methtable_t *mt, jl_tuple_t *type,
                                   jl_function_t *method, jl_tuple_t *decl,
                                   jl_tuple_t *sparams)
{
    size_t i;
    int need_dummy_entries = 0;
    jl_value_t *temp=NULL;
    jl_function_t *newmeth=NULL;
    JL_GC_PUSH(&type, &temp, &newmeth);

    for (i=0; i < type->length; i++) {
        jl_value_t *elt = jl_tupleref(type,i);
        int set_to_any = 0;
        if (nth_slot_type(decl,i) == jl_ANY_flag) {
            // don't specialize on slots marked ANY
            temp = jl_tupleref(type, i);
            jl_tupleset(type, i, (jl_value_t*)jl_any_type);
            int nintr=0;
            jl_methlist_t *curr = mt->defs;
            // if this method is the only match even with the current slot
            // set to Any, then it is safe to cache it that way.
            while (curr != NULL && curr->func!=method) {
                if (jl_type_intersection((jl_value_t*)curr->sig,
                                         (jl_value_t*)type) !=
                    (jl_value_t*)jl_bottom_type) {
                    nintr++;
                    break;
                }
                curr = curr->next;
            }
            if (nintr) {
                // TODO: even if different specializations of this slot need
                // separate cache entries, have them share code.
                jl_tupleset(type, i, temp);
            }
            else {
                set_to_any = 1;
            }
        }
        if (set_to_any) {
        }
        else if (jl_is_tuple(elt)) {
            /*
              don't cache tuple type exactly; just remember that it was
              a tuple, unless the declaration asks for something more
              specific. determined with a type intersection.
            */
            int might_need_dummy=0;
            temp = jl_tupleref(type, i);
            if (i < decl->length) {
                jl_value_t *declt = jl_tupleref(decl,i);
                // for T..., intersect with T
                if (jl_is_seq_type(declt))
                    declt = jl_tparam0(declt);
                if (declt == (jl_value_t*)jl_tuple_type ||
                    jl_subtype((jl_value_t*)jl_tuple_type, declt, 0)) {
                    // don't specialize args that matched (Any...) or Any
                    jl_tupleset(type, i, (jl_value_t*)jl_tuple_type);
                    might_need_dummy = 1;
                }
                else {
                    declt = jl_type_intersection(declt,
                                                 (jl_value_t*)jl_tuple_type);
                    if (((jl_tuple_t*)elt)->length > 3 ||
                        tuple_all_Any((jl_tuple_t*)declt)) {
                        jl_tupleset(type, i, declt);
                        might_need_dummy = 1;
                    }
                }
            }
            else {
                jl_tupleset(type, i, (jl_value_t*)jl_tuple_type);
                might_need_dummy = 1;
            }
            assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type);
            if (might_need_dummy) {
                jl_methlist_t *curr = mt->defs;
                // can't generalize type if there's an overlapping definition
                // with typevars
                while (curr != NULL && curr->func!=method) {
                    if (curr->tvars!=jl_null &&
                        jl_type_intersection((jl_value_t*)curr->sig,
                                             (jl_value_t*)type) !=
                        (jl_value_t*)jl_bottom_type) {
                        jl_tupleset(type, i, temp);
                        might_need_dummy = 0;
                        break;
                    }
                    curr = curr->next;
                }
            }
            if (might_need_dummy) {
                jl_methlist_t *curr = mt->defs;
                while (curr != NULL && curr->func!=method) {
                    jl_tuple_t *sig = curr->sig;
                    if (sig->length > i &&
                        jl_is_tuple(jl_tupleref(sig,i))) {
                        need_dummy_entries = 1;
                        break;
                    }
                    curr = curr->next;
                }
            }
        }
        else if (jl_is_type_type(elt) && jl_is_type_type(jl_tparam0(elt))) {
            /*
              actual argument was Type{...}, we computed its type as
              Type{Type{...}}. we must avoid unbounded nesting here, so
              cache the signature as Type{T}, unless something more
              specific like Type{Type{Int32}} was actually declared.
              this can be determined using a type intersection.
            */
            if (i < decl->length) {
                jl_value_t *declt = jl_tupleref(decl,i);
                // for T..., intersect with T
                if (jl_is_seq_type(declt))
                    declt = jl_tparam0(declt);
                jl_tupleset(type, i,
                            jl_type_intersection(declt, (jl_value_t*)jl_typetype_type));
            }
            else {
                jl_tupleset(type, i, (jl_value_t*)jl_typetype_type);
            }
            assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type);
        }
        else if (jl_is_type_type(elt) &&
                 very_general_type(nth_slot_type(decl,i))) {
            /*
              here's a fairly complex heuristic: if this argument slot's
              declared type is Any, and no definition overlaps with Type
              for this slot, then don't specialize for every Type that
              might be passed.
              Since every type x has its own type Type{x}, this would be
              excessive specialization for an Any slot.
            */
            int ok=1;
            jl_methlist_t *curr = mt->defs;
            while (curr != NULL) {
                jl_value_t *slottype = nth_slot_type(curr->sig, i);
                if (slottype &&
                    !very_general_type(slottype) &&
                    jl_type_intersection(slottype,
                                         (jl_value_t*)jl_type_type) !=
                    (jl_value_t*)jl_bottom_type) {
                    ok=0;
                    break;
                }
                curr = curr->next;
            }
            if (ok) {
                jl_tupleset(type, i, (jl_value_t*)jl_typetype_type);
            }
        }
    }

    // for varargs methods, only specialize up to max_args.
    // in general, here we want to find the biggest type that's not a
    // supertype of any other method signatures. so far we are conservative
    // and the types we find should be bigger.
    if (type->length > jl_unbox_long(mt->max_args) &&
        jl_is_seq_type(jl_tupleref(decl,decl->length-1))) {
        size_t nspec = jl_unbox_long(mt->max_args)+2;
        jl_tuple_t *limited = jl_alloc_tuple(nspec);
        for(i=0; i < nspec-1; i++) {
            jl_tupleset(limited, i, jl_tupleref(type, i));
        }
        jl_value_t *lasttype = jl_tupleref(type,i-1);
        // if all subsequent arguments are subtypes of lasttype, specialize
        // on that instead of decl. for example, if decl is
        // (Any...)
        // and type is
        // (Symbol, Symbol, Symbol)
        // then specialize as (Symbol...), but if type is
        // (Symbol, Int32, Expr)
        // then specialize as (Any...)
        size_t j = i;
        int all_are_subtypes=1;
        for(; j < type->length; j++) {
            if (!jl_subtype(jl_tupleref(type,j), lasttype, 0)) {
                all_are_subtypes = 0;
                break;
            }
        }
        type = limited;
        if (all_are_subtypes) {
            // avoid Type{Type{...}...}...
            if (jl_is_type_type(lasttype))
                lasttype = (jl_value_t*)jl_type_type;
            temp = (jl_value_t*)jl_tuple1(lasttype);
            jl_tupleset(type, i, jl_apply_type((jl_value_t*)jl_seq_type,
                                               (jl_tuple_t*)temp));
        }
        else {
            jl_value_t *lastdeclt = jl_tupleref(decl,decl->length-1);
            if (sparams->length > 0) {
                lastdeclt = (jl_value_t*)
                    jl_instantiate_type_with((jl_type_t*)lastdeclt,
                                             sparams->data,
                                             sparams->length/2);
            }
            jl_tupleset(type, i, lastdeclt);
        }
        // now there is a problem: the computed signature is more
        // general than just the given arguments, so it might conflict
        // with another definition that doesn't have cache instances yet.
        // to fix this, we insert dummy cache entries for all intersections
        // of this signature and definitions. those dummy entries will
        // supersede this one in conflicted cases, alerting us that there
        // should actually be a cache miss.
        need_dummy_entries = 1;
    }

    if (need_dummy_entries) {
        temp = ml_matches(mt->defs, (jl_value_t*)type, lambda_sym, -1);
        for(i=0; i < jl_array_len(temp); i++) {
            jl_value_t *m = jl_cellref(temp, i);
            if (jl_tupleref(m,2) != (jl_value_t*)method->linfo) {
                jl_method_cache_insert(mt, (jl_tuple_t*)jl_tupleref(m, 0),
                                       NULL);
            }
        }
    }

    // here we infer types and specialize the method
    /*
    if (sparams==jl_null)
        newmeth = method;
    else
    */
    jl_array_t *lilist=NULL;
    jl_lambda_info_t *li=NULL;
    if (method->linfo && method->linfo->specializations!=NULL) {
        // reuse code already generated for this combination of lambda and
        // arguments types. this happens for inner generic functions where
        // a new closure is generated on each call to the enclosing function.
        lilist = method->linfo->specializations;
        int k;
        for(k=0; k < lilist->length; k++) {
            li = (jl_lambda_info_t*)jl_cellref(lilist, k);
            if (jl_types_equal(li->specTypes, (jl_value_t*)type))
                break;
        }
        if (k == lilist->length) lilist=NULL;
    }
    if (lilist != NULL && !li->inInference) {
        assert(li);
        newmeth = jl_reinstantiate_method(method, li);
        (void)jl_method_cache_insert(mt, type, newmeth);
        JL_GC_POP();
        return newmeth;
    }
    else {
        newmeth = jl_instantiate_method(method, sparams);
    }
    /*
      if "method" itself can ever be compiled, for example for use as
      an unspecialized method (see below), then newmeth->fptr might point
      to some slow compiled code instead of jl_trampoline, meaning our
      type-inferred code would never get compiled. this can be fixed with
      the commented-out snippet below.
    */
    assert(!(newmeth->linfo && newmeth->linfo->ast) ||
           newmeth->fptr == &jl_trampoline);
    /*
    if (newmeth->linfo&&newmeth->linfo->ast&&newmeth->fptr!=&jl_trampoline) {
        newmeth->fptr = &jl_trampoline;
    }
    */

    (void)jl_method_cache_insert(mt, type, newmeth);

    if (newmeth->linfo != NULL && newmeth->linfo->sparams == jl_null) {
        // when there are no static parameters, one unspecialized version
        // of a function can be shared among all cached specializations.
        if (method->linfo->unspecialized == NULL) {
            method->linfo->unspecialized =
                jl_instantiate_method(method, jl_null);
        }
        newmeth->linfo->unspecialized = method->linfo->unspecialized;
    }

    if (newmeth->linfo != NULL && newmeth->linfo->ast != NULL) {
        newmeth->linfo->specTypes = (jl_value_t*)type;
        jl_array_t *spe = method->linfo->specializations;
        if (spe == NULL) {
            spe = jl_alloc_cell_1d(1);
            jl_cellset(spe, 0, newmeth->linfo);
        }
        else {
            jl_cell_1d_push(spe, (jl_value_t*)newmeth->linfo);
        }
        method->linfo->specializations = spe;
        jl_type_infer(newmeth->linfo, type, method->linfo);
    }
    JL_GC_POP();
    return newmeth;
}
示例#12
0
文件: typemap.c 项目: DrGar/julia
// calls fptr on each jl_typemap_entry_t in cache in sort order
// for which type ∩ ml->type != Union{}, until fptr return false
int jl_typemap_intersection_visitor(union jl_typemap_t map, int offs,
        struct typemap_intersection_env *closure)
{
    jl_typemap_entry_t *ml;
    if (jl_typeof(map.unknown) == (jl_value_t*)jl_typemap_level_type) {
        jl_typemap_level_t *cache = map.node;
        jl_value_t *ty = NULL;
        size_t l = jl_datatype_nfields(closure->type);
        if (closure->va && l == offs - 1) {
            ty = closure->va;
        }
        else if (l > offs) {
            ty = jl_tparam(closure->type, offs);
        }
        if (ty) {
            if (cache->targ != (void*)jl_nothing) {
                if (jl_is_type_type(ty) && is_cache_leaf(jl_tparam0(ty))) {
                    // direct lookup of leaf types
                    union jl_typemap_t ml = mtcache_hash_lookup(cache->targ, jl_tparam0(ty), 1, offs);
                    if (ml.unknown != jl_nothing) {
                        if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0;
                    }
                }
                else {
                    // else an array scan is required to check subtypes
                    // TODO: fast-path: optimized pre-intersection test
                    if (!jl_typemap_intersection_array_visitor(cache->targ, ty, 1, offs, closure)) return 0;
                }
            }
            if (cache->arg1 != (void*)jl_nothing) {
                if (is_cache_leaf(ty)) {
                    // direct lookup of leaf types
                    union jl_typemap_t ml = mtcache_hash_lookup(cache->arg1, ty, 0, offs);
                    if (ml.unknown != jl_nothing) {
                        if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0;
                    }
                }
                else {
                    // else an array scan is required to check subtypes
                    if (!jl_typemap_intersection_array_visitor(cache->arg1, ty, 0, offs, closure)) return 0;
                }
            }
        }
        ml = map.node->linear;
    }
    else {
        ml = map.leaf;
    }
    // slow-path scan everything else
    // mark this `register` because (for branch prediction)
    // that can be absolutely critical for speed
    register jl_typemap_intersection_visitor_fptr fptr = closure->fptr;
    while (ml != (void*)jl_nothing) {
        // TODO: optimize intersection test
        if (closure->type == (jl_value_t*)ml->sig) {
            // fast-path for the intersection of a type with itself
            if (closure->env)
                closure->env = ml->tvars;
            closure->ti = closure->type;
            if (!fptr(ml, closure))
                return 0;
        }
        else {
            jl_value_t *ti;
            if (closure->env) {
                closure->env = jl_emptysvec;
                ti = jl_lookup_match(closure->type, (jl_value_t*)ml->sig, &closure->env, ml->tvars);
            }
            else {
                ti = jl_type_intersection(closure->type, (jl_value_t*)ml->sig);
            }
            if (ti != (jl_value_t*)jl_bottom_type) {
                closure->ti = ti;
                if (!fptr(ml, closure))
                    return 0;
            }
        }
        ml = ml->next;
    }
    return 1;
}