Exemplo n.º 1
0
// get binding for assignment
jl_binding_t *jl_get_binding_wr(jl_module_t *m, jl_sym_t *var)
{
    jl_binding_t **bp = (jl_binding_t**)ptrhash_bp(&m->bindings, var);
    jl_binding_t *b;
    if (*bp == HT_NOTFOUND) {
        b = (jl_binding_t*)allocb(sizeof(jl_binding_t));
        b->name = var;
        b->value = NULL;
        b->type = (jl_type_t*)jl_any_type;
        b->constp = 0;
        b->exportp = 0;
        *bp = b;

        // keep track of all variables added after the VARIABLES array
        // is defined
        if (jl_system_module) {
            if (varlist_binding == NULL) {
                varlist_binding = jl_get_binding(jl_system_module, jl_symbol("VARIABLES"));
            }
            if (varlist_binding && varlist_binding->value != NULL &&
                jl_typeis(varlist_binding->value, jl_array_any_type)) {
                jl_array_t *a = (jl_array_t*)varlist_binding->value;
                jl_cell_1d_push(a, (jl_value_t*)var);
            }
        }
    }
    return *bp;
}
Exemplo n.º 2
0
static int literal_val_id(jl_value_t *v)
{
    for(int i=0; i < jl_array_len(tree_literal_values); i++) {
        if (jl_cellref(tree_literal_values,i) == v)
            return i;
    }
    jl_cell_1d_push(tree_literal_values, v);
    return jl_array_len(tree_literal_values)-1;
}
Exemplo n.º 3
0
// load time init procedure: in build mode, only record order
void jl_module_load_time_initialize(jl_module_t *m)
{
    int build_mode = (jl_compileropts.build_path != NULL);
    if (build_mode) {
        if (jl_module_init_order == NULL)
            jl_module_init_order = jl_alloc_cell_1d(0);
        jl_cell_1d_push(jl_module_init_order, (jl_value_t*)m);
    }
    else {
        jl_module_run_initializer(m);
    }
}
Exemplo n.º 4
0
// load time init procedure: in build mode, only record order
void jl_module_load_time_initialize(jl_module_t *m)
{
    int build_mode = jl_generating_output();
    if (build_mode) {
        if (jl_module_init_order == NULL)
            jl_module_init_order = jl_alloc_cell_1d(0);
        jl_cell_1d_push(jl_module_init_order, (jl_value_t*)m);
        jl_function_t *f = jl_module_get_initializer(m);
        if (f) jl_get_specialization(f, (jl_tupletype_t*)jl_typeof(jl_emptytuple));
    }
    else {
        jl_module_run_initializer(m);
    }
}
Exemplo n.º 5
0
// load time init procedure: in build mode, only record order
void jl_module_load_time_initialize(jl_module_t *m)
{
    int build_mode = (jl_compileropts.build_path != NULL);
    if (build_mode) {
        if (jl_module_init_order == NULL)
            jl_module_init_order = jl_alloc_cell_1d(0);
        jl_cell_1d_push(jl_module_init_order, (jl_value_t*)m);
        jl_function_t *f = jl_module_get_initializer(m);
        if (f) jl_get_specialization(f, jl_null);
    }
    else {
        jl_module_run_initializer(m);
    }
}
Exemplo n.º 6
0
void jl_module_run_initializer(jl_module_t *m)
{
    jl_value_t *f = jl_get_global(m, jl_symbol("__init__"));
    if (f == NULL || !jl_is_function(f))
        return;
    int build_mode = (jl_compileropts.build_path != NULL);
    if (build_mode) {
        if (jl_module_init_order == NULL)
            jl_module_init_order = jl_alloc_cell_1d(0);
        jl_cell_1d_push(jl_module_init_order, (jl_value_t*)m);
    }
    JL_TRY {
        jl_apply((jl_function_t*)f, NULL, 0);
    }
    JL_CATCH {
        JL_PRINTF(JL_STDERR, "Warning: error initializing module %s:\n", m->name->name);
        jl_static_show(JL_STDERR, jl_exception_in_transit);
        JL_PRINTF(JL_STDERR, "\n");
    }
}
Exemplo n.º 7
0
// load time init procedure: in build mode, only record order
static void jl_module_load_time_initialize(jl_module_t *m)
{
    int build_mode = jl_generating_output();
    if (build_mode) {
        if (jl_module_init_order == NULL)
            jl_module_init_order = jl_alloc_cell_1d(0);
        jl_cell_1d_push(jl_module_init_order, (jl_value_t*)m);
        jl_function_t *f = jl_module_get_initializer(m);
        if (f) {
            jl_value_t *tt = jl_is_type(f) ? (jl_value_t*)jl_wrap_Type(f) : jl_typeof(f);
            JL_GC_PUSH1(&tt);
            tt = (jl_value_t*)jl_apply_tuple_type_v(&tt, 1);
            jl_get_specialization1((jl_tupletype_t*)tt, NULL);
            JL_GC_POP();
        }
    }
    else {
        jl_module_run_initializer(m);
    }
}
Exemplo n.º 8
0
static jl_value_t *jl_deserialize_tag_type(ios_t *s, jl_struct_type_t *kind, int pos)
{
    if (kind == jl_struct_kind) {
        jl_struct_type_t *st =
            (jl_struct_type_t*)newobj((jl_type_t*)jl_struct_kind,
                                      STRUCT_TYPE_NW);
        st->instance = NULL;
        ptrhash_put(&backref_table, (void*)(ptrint_t)pos, st);
        st->name = (jl_typename_t*)jl_deserialize_value(s);
        st->parameters = (jl_tuple_t*)jl_deserialize_value(s);
        st->super = (jl_tag_type_t*)jl_deserialize_value(s);
        st->names = (jl_tuple_t*)jl_deserialize_value(s);
        st->types = (jl_tuple_t*)jl_deserialize_value(s);
        st->ctor_factory = jl_deserialize_value(s);
        st->env = jl_deserialize_value(s);
        st->linfo = (jl_lambda_info_t*)jl_deserialize_value(s);
        st->fptr = jl_deserialize_fptr(s);
        st->uid = read_int32(s);;
        if (st->name == jl_array_type->name) {
            // builtin types are not serialized, so their caches aren't
            // explicitly saved. so we reconstruct the caches of builtin
            // parametric types here.
            jl_cell_1d_push(tagtype_list, (jl_value_t*)st);
        }
        return (jl_value_t*)st;
    }
    else if (kind == jl_bits_kind) {
        int form = read_uint8(s);
        jl_bits_type_t *bt;
        if (form == 2)
            bt = jl_int32_type;
        else if (form == 3)
            bt = jl_bool_type;
        else if (form == 4)
            bt = jl_int64_type;
        else
            bt = (jl_bits_type_t*)newobj((jl_type_t*)jl_bits_kind,
                                         BITS_TYPE_NW);
        ptrhash_put(&backref_table, (void*)(ptrint_t)pos, bt);
        bt->name = (jl_typename_t*)jl_deserialize_value(s);
        bt->parameters = (jl_tuple_t*)jl_deserialize_value(s);

        size_t nbits = read_int32(s);
        bt->nbits = nbits;
        bt->bnbits = jl_box_int32(nbits);
        bt->fptr = NULL;
        bt->env = NULL;
        bt->linfo = NULL;
        bt->super = (jl_tag_type_t*)jl_deserialize_value(s);
        bt->uid = read_int32(s);
        if (bt->name == jl_pointer_type->name) {
            jl_cell_1d_push(tagtype_list, (jl_value_t*)bt);
        }
        return (jl_value_t*)bt;
    }
    else {
        assert(kind == jl_tag_kind);
        jl_tag_type_t *tt =
            (jl_tag_type_t*)newobj((jl_type_t*)jl_tag_kind, TAG_TYPE_NW);
        ptrhash_put(&backref_table, (void*)(ptrint_t)pos, tt);
        tt->name = (jl_typename_t*)jl_deserialize_value(s);
        tt->parameters = (jl_tuple_t*)jl_deserialize_value(s);
        tt->super = (jl_tag_type_t*)jl_deserialize_value(s);
        tt->fptr = NULL;
        tt->env = NULL;
        tt->linfo = NULL;
        if (tt->name == jl_type_type->name || tt->name == jl_seq_type->name ||
            tt->name == jl_abstractarray_type->name) {
            jl_cell_1d_push(tagtype_list, (jl_value_t*)tt);
        }
        return (jl_value_t*)tt;
    }
    assert(0);
    return NULL;
}
Exemplo n.º 9
0
static void jl_serialize_value_(ios_t *s, jl_value_t *v)
{
    if (v == NULL) {
        write_uint8(s, Null_tag);
        return;
    }

    void **bp = ptrhash_bp(&ser_tag, v);
    if (*bp != HT_NOTFOUND) {
        write_as_tag(s, (uint8_t)(ptrint_t)*bp);
        return;
    }

    if (tree_literal_values) {
        // compressing tree
        if (!is_ast_node(v)) {
            writetag(s, (jl_value_t*)LiteralVal_tag);
            write_uint16(s, literal_val_id(v));
            return;
        }
    }
    else {
        bp = ptrhash_bp(&backref_table, v);
        if (*bp != HT_NOTFOUND) {
            write_uint8(s, BackRef_tag);
            write_int32(s, (ptrint_t)*bp);
            return;
        }
        ptrhash_put(&backref_table, v, (void*)(ptrint_t)ios_pos(s));
    }

    size_t i;
    if (jl_is_tuple(v)) {
        size_t l = ((jl_tuple_t*)v)->length;
        if (l <= 255) {
            writetag(s, jl_tuple_type);
            write_uint8(s, (uint8_t)l);
        }
        else {
            writetag(s, (jl_value_t*)LongTuple_tag);
            write_int32(s, l);
        }
        for(i=0; i < l; i++) {
            jl_serialize_value(s, jl_tupleref(v, i));
        }
    }
    else if (jl_is_symbol(v)) {
        size_t l = strlen(((jl_sym_t*)v)->name);
        if (l <= 255) {
            writetag(s, jl_symbol_type);
            write_uint8(s, (uint8_t)l);
        }
        else {
            writetag(s, (jl_value_t*)LongSymbol_tag);
            write_int32(s, l);
        }
        ios_write(s, ((jl_sym_t*)v)->name, l);
    }
    else if (jl_is_array(v)) {
        jl_array_t *ar = (jl_array_t*)v;
        writetag(s, (jl_value_t*)jl_array_type);
        jl_serialize_value(s, ar->type);
        jl_value_t *elty = jl_tparam0(ar->type);
        for (i=0; i < ar->ndims; i++)
            jl_serialize_value(s, jl_box_long(jl_array_dim(ar,i)));
        if (jl_is_bits_type(elty)) {
            size_t tot = ar->length * ar->elsize;
            ios_write(s, ar->data, tot);
        }
        else {
            for(i=0; i < ar->length; i++) {
                jl_serialize_value(s, jl_cellref(v, i));
            }
        }
    }
    else if (jl_is_expr(v)) {
        jl_expr_t *e = (jl_expr_t*)v;
        size_t l = e->args->length;
        if (l <= 255) {
            writetag(s, jl_expr_type);
            write_uint8(s, (uint8_t)l);
        }
        else {
            writetag(s, (jl_value_t*)LongExpr_tag);
            write_int32(s, l);
        }
        jl_serialize_value(s, e->head);
        jl_serialize_value(s, e->etype);
        for(i=0; i < l; i++) {
            jl_serialize_value(s, jl_exprarg(e, i));
        }
    }
    else if (jl_is_some_tag_type(v)) {
        jl_serialize_tag_type(s, v);
    }
    else if (jl_is_typevar(v)) {
        writetag(s, jl_tvar_type);
        jl_serialize_value(s, ((jl_tvar_t*)v)->name);
        jl_serialize_value(s, ((jl_tvar_t*)v)->lb);
        jl_serialize_value(s, ((jl_tvar_t*)v)->ub);
        write_int8(s, ((jl_tvar_t*)v)->bound);
    }
    else if (jl_is_function(v)) {
        writetag(s, jl_func_kind);
        jl_serialize_value(s, v->type);
        jl_function_t *f = (jl_function_t*)v;
        jl_serialize_value(s, (jl_value_t*)f->linfo);
        jl_serialize_value(s, f->env);
        if (f->linfo && f->linfo->ast &&
            (jl_is_expr(f->linfo->ast) || jl_is_tuple(f->linfo->ast)) &&
            f->fptr != &jl_trampoline) {
            write_int32(s, 0);
        }
        else {
            jl_serialize_fptr(s, f->fptr);
        }
    }
    else if (jl_is_lambda_info(v)) {
        writetag(s, jl_lambda_info_type);
        jl_lambda_info_t *li = (jl_lambda_info_t*)v;
        jl_serialize_value(s, li->ast);
        jl_serialize_value(s, (jl_value_t*)li->sparams);
        // don't save cached type info for code in the Base module, because
        // it might reference types in the old System module.
        if (li->module == jl_base_module)
            jl_serialize_value(s, (jl_value_t*)jl_null);
        else
            jl_serialize_value(s, (jl_value_t*)li->tfunc);
        jl_serialize_value(s, (jl_value_t*)li->name);
        jl_serialize_value(s, (jl_value_t*)li->specTypes);
        jl_serialize_value(s, (jl_value_t*)li->specializations);
        jl_serialize_value(s, (jl_value_t*)li->inferred);
        jl_serialize_value(s, (jl_value_t*)li->file);
        jl_serialize_value(s, (jl_value_t*)li->line);
        jl_serialize_value(s, (jl_value_t*)li->module);
    }
    else if (jl_typeis(v, jl_module_type)) {
        jl_serialize_module(s, (jl_module_t*)v);
    }
    else if (jl_typeis(v, jl_methtable_type)) {
        writetag(s, jl_methtable_type);
        jl_methtable_t *mt = (jl_methtable_t*)v;
        jl_serialize_methlist(s, mt->defs);
        jl_serialize_methlist(s, mt->cache);
        jl_serialize_value(s, mt->cache_1arg);
        write_int32(s, mt->max_args);
    }
    else if (jl_typeis(v, jl_task_type)) {
        jl_error("Task cannot be serialized");
    }
    else {
        jl_value_t *t = (jl_value_t*)jl_typeof(v);
        if (jl_is_bits_type(t)) {
            void *data = jl_bits_data(v);
            if (t == (jl_value_t*)jl_int64_type &&
                *(int64_t*)data >= S32_MIN && *(int64_t*)data <= S32_MAX) {
                writetag(s, (jl_value_t*)SmallInt64_tag);
                write_int32(s, (int32_t)*(int64_t*)data);
            }
            else {
                int nb = ((jl_bits_type_t*)t)->nbits;
                writetag(s, jl_bits_kind);
                jl_serialize_value(s, t);
                ios_write(s, data, nb/8);
            }
        }
        else if (jl_is_struct_type(t)) {
            writetag(s, jl_struct_kind);
            jl_serialize_value(s, t);
            size_t nf = ((jl_struct_type_t*)t)->names->length;
            size_t i;
            for(i=0; i < nf; i++) {
                jl_value_t *fld = ((jl_value_t**)v)[i+1];
                jl_serialize_value(s, fld);
            }
            if (t == jl_idtable_type) {
                jl_cell_1d_push(idtable_list, v);
            }
        }
        else {
            assert(0);
        }
    }
}
Exemplo n.º 10
0
Arquivo: gf.c Projeto: cshen/julia
static jl_function_t *cache_method(jl_methtable_t *mt, jl_tuple_t *type,
                                   jl_function_t *method, jl_tuple_t *decl,
                                   jl_tuple_t *sparams)
{
    size_t i;
    int need_dummy_entries = 0;
    jl_value_t *temp=NULL;
    jl_function_t *newmeth=NULL;
    JL_GC_PUSH(&type, &temp, &newmeth);

    for (i=0; i < type->length; i++) {
        jl_value_t *elt = jl_tupleref(type,i);
        int set_to_any = 0;
        if (nth_slot_type(decl,i) == jl_ANY_flag) {
            // don't specialize on slots marked ANY
            temp = jl_tupleref(type, i);
            jl_tupleset(type, i, (jl_value_t*)jl_any_type);
            int nintr=0;
            jl_methlist_t *curr = mt->defs;
            // if this method is the only match even with the current slot
            // set to Any, then it is safe to cache it that way.
            while (curr != NULL && curr->func!=method) {
                if (jl_type_intersection((jl_value_t*)curr->sig,
                                         (jl_value_t*)type) !=
                    (jl_value_t*)jl_bottom_type) {
                    nintr++;
                    break;
                }
                curr = curr->next;
            }
            if (nintr) {
                // TODO: even if different specializations of this slot need
                // separate cache entries, have them share code.
                jl_tupleset(type, i, temp);
            }
            else {
                set_to_any = 1;
            }
        }
        if (set_to_any) {
        }
        else if (jl_is_tuple(elt)) {
            /*
              don't cache tuple type exactly; just remember that it was
              a tuple, unless the declaration asks for something more
              specific. determined with a type intersection.
            */
            int might_need_dummy=0;
            temp = jl_tupleref(type, i);
            if (i < decl->length) {
                jl_value_t *declt = jl_tupleref(decl,i);
                // for T..., intersect with T
                if (jl_is_seq_type(declt))
                    declt = jl_tparam0(declt);
                if (declt == (jl_value_t*)jl_tuple_type ||
                    jl_subtype((jl_value_t*)jl_tuple_type, declt, 0)) {
                    // don't specialize args that matched (Any...) or Any
                    jl_tupleset(type, i, (jl_value_t*)jl_tuple_type);
                    might_need_dummy = 1;
                }
                else {
                    declt = jl_type_intersection(declt,
                                                 (jl_value_t*)jl_tuple_type);
                    if (((jl_tuple_t*)elt)->length > 3 ||
                        tuple_all_Any((jl_tuple_t*)declt)) {
                        jl_tupleset(type, i, declt);
                        might_need_dummy = 1;
                    }
                }
            }
            else {
                jl_tupleset(type, i, (jl_value_t*)jl_tuple_type);
                might_need_dummy = 1;
            }
            assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type);
            if (might_need_dummy) {
                jl_methlist_t *curr = mt->defs;
                // can't generalize type if there's an overlapping definition
                // with typevars
                while (curr != NULL && curr->func!=method) {
                    if (curr->tvars!=jl_null &&
                        jl_type_intersection((jl_value_t*)curr->sig,
                                             (jl_value_t*)type) !=
                        (jl_value_t*)jl_bottom_type) {
                        jl_tupleset(type, i, temp);
                        might_need_dummy = 0;
                        break;
                    }
                    curr = curr->next;
                }
            }
            if (might_need_dummy) {
                jl_methlist_t *curr = mt->defs;
                while (curr != NULL && curr->func!=method) {
                    jl_tuple_t *sig = curr->sig;
                    if (sig->length > i &&
                        jl_is_tuple(jl_tupleref(sig,i))) {
                        need_dummy_entries = 1;
                        break;
                    }
                    curr = curr->next;
                }
            }
        }
        else if (jl_is_type_type(elt) && jl_is_type_type(jl_tparam0(elt))) {
            /*
              actual argument was Type{...}, we computed its type as
              Type{Type{...}}. we must avoid unbounded nesting here, so
              cache the signature as Type{T}, unless something more
              specific like Type{Type{Int32}} was actually declared.
              this can be determined using a type intersection.
            */
            if (i < decl->length) {
                jl_value_t *declt = jl_tupleref(decl,i);
                // for T..., intersect with T
                if (jl_is_seq_type(declt))
                    declt = jl_tparam0(declt);
                jl_tupleset(type, i,
                            jl_type_intersection(declt, (jl_value_t*)jl_typetype_type));
            }
            else {
                jl_tupleset(type, i, (jl_value_t*)jl_typetype_type);
            }
            assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type);
        }
        else if (jl_is_type_type(elt) &&
                 very_general_type(nth_slot_type(decl,i))) {
            /*
              here's a fairly complex heuristic: if this argument slot's
              declared type is Any, and no definition overlaps with Type
              for this slot, then don't specialize for every Type that
              might be passed.
              Since every type x has its own type Type{x}, this would be
              excessive specialization for an Any slot.
            */
            int ok=1;
            jl_methlist_t *curr = mt->defs;
            while (curr != NULL) {
                jl_value_t *slottype = nth_slot_type(curr->sig, i);
                if (slottype &&
                    !very_general_type(slottype) &&
                    jl_type_intersection(slottype,
                                         (jl_value_t*)jl_type_type) !=
                    (jl_value_t*)jl_bottom_type) {
                    ok=0;
                    break;
                }
                curr = curr->next;
            }
            if (ok) {
                jl_tupleset(type, i, (jl_value_t*)jl_typetype_type);
            }
        }
    }

    // for varargs methods, only specialize up to max_args.
    // in general, here we want to find the biggest type that's not a
    // supertype of any other method signatures. so far we are conservative
    // and the types we find should be bigger.
    if (type->length > jl_unbox_long(mt->max_args) &&
        jl_is_seq_type(jl_tupleref(decl,decl->length-1))) {
        size_t nspec = jl_unbox_long(mt->max_args)+2;
        jl_tuple_t *limited = jl_alloc_tuple(nspec);
        for(i=0; i < nspec-1; i++) {
            jl_tupleset(limited, i, jl_tupleref(type, i));
        }
        jl_value_t *lasttype = jl_tupleref(type,i-1);
        // if all subsequent arguments are subtypes of lasttype, specialize
        // on that instead of decl. for example, if decl is
        // (Any...)
        // and type is
        // (Symbol, Symbol, Symbol)
        // then specialize as (Symbol...), but if type is
        // (Symbol, Int32, Expr)
        // then specialize as (Any...)
        size_t j = i;
        int all_are_subtypes=1;
        for(; j < type->length; j++) {
            if (!jl_subtype(jl_tupleref(type,j), lasttype, 0)) {
                all_are_subtypes = 0;
                break;
            }
        }
        type = limited;
        if (all_are_subtypes) {
            // avoid Type{Type{...}...}...
            if (jl_is_type_type(lasttype))
                lasttype = (jl_value_t*)jl_type_type;
            temp = (jl_value_t*)jl_tuple1(lasttype);
            jl_tupleset(type, i, jl_apply_type((jl_value_t*)jl_seq_type,
                                               (jl_tuple_t*)temp));
        }
        else {
            jl_value_t *lastdeclt = jl_tupleref(decl,decl->length-1);
            if (sparams->length > 0) {
                lastdeclt = (jl_value_t*)
                    jl_instantiate_type_with((jl_type_t*)lastdeclt,
                                             sparams->data,
                                             sparams->length/2);
            }
            jl_tupleset(type, i, lastdeclt);
        }
        // now there is a problem: the computed signature is more
        // general than just the given arguments, so it might conflict
        // with another definition that doesn't have cache instances yet.
        // to fix this, we insert dummy cache entries for all intersections
        // of this signature and definitions. those dummy entries will
        // supersede this one in conflicted cases, alerting us that there
        // should actually be a cache miss.
        need_dummy_entries = 1;
    }

    if (need_dummy_entries) {
        temp = ml_matches(mt->defs, (jl_value_t*)type, lambda_sym, -1);
        for(i=0; i < jl_array_len(temp); i++) {
            jl_value_t *m = jl_cellref(temp, i);
            if (jl_tupleref(m,2) != (jl_value_t*)method->linfo) {
                jl_method_cache_insert(mt, (jl_tuple_t*)jl_tupleref(m, 0),
                                       NULL);
            }
        }
    }

    // here we infer types and specialize the method
    /*
    if (sparams==jl_null)
        newmeth = method;
    else
    */
    jl_array_t *lilist=NULL;
    jl_lambda_info_t *li=NULL;
    if (method->linfo && method->linfo->specializations!=NULL) {
        // reuse code already generated for this combination of lambda and
        // arguments types. this happens for inner generic functions where
        // a new closure is generated on each call to the enclosing function.
        lilist = method->linfo->specializations;
        int k;
        for(k=0; k < lilist->length; k++) {
            li = (jl_lambda_info_t*)jl_cellref(lilist, k);
            if (jl_types_equal(li->specTypes, (jl_value_t*)type))
                break;
        }
        if (k == lilist->length) lilist=NULL;
    }
    if (lilist != NULL && !li->inInference) {
        assert(li);
        newmeth = jl_reinstantiate_method(method, li);
        (void)jl_method_cache_insert(mt, type, newmeth);
        JL_GC_POP();
        return newmeth;
    }
    else {
        newmeth = jl_instantiate_method(method, sparams);
    }
    /*
      if "method" itself can ever be compiled, for example for use as
      an unspecialized method (see below), then newmeth->fptr might point
      to some slow compiled code instead of jl_trampoline, meaning our
      type-inferred code would never get compiled. this can be fixed with
      the commented-out snippet below.
    */
    assert(!(newmeth->linfo && newmeth->linfo->ast) ||
           newmeth->fptr == &jl_trampoline);
    /*
    if (newmeth->linfo&&newmeth->linfo->ast&&newmeth->fptr!=&jl_trampoline) {
        newmeth->fptr = &jl_trampoline;
    }
    */

    (void)jl_method_cache_insert(mt, type, newmeth);

    if (newmeth->linfo != NULL && newmeth->linfo->sparams == jl_null) {
        // when there are no static parameters, one unspecialized version
        // of a function can be shared among all cached specializations.
        if (method->linfo->unspecialized == NULL) {
            method->linfo->unspecialized =
                jl_instantiate_method(method, jl_null);
        }
        newmeth->linfo->unspecialized = method->linfo->unspecialized;
    }

    if (newmeth->linfo != NULL && newmeth->linfo->ast != NULL) {
        newmeth->linfo->specTypes = (jl_value_t*)type;
        jl_array_t *spe = method->linfo->specializations;
        if (spe == NULL) {
            spe = jl_alloc_cell_1d(1);
            jl_cellset(spe, 0, newmeth->linfo);
        }
        else {
            jl_cell_1d_push(spe, (jl_value_t*)newmeth->linfo);
        }
        method->linfo->specializations = spe;
        jl_type_infer(newmeth->linfo, type, method->linfo);
    }
    JL_GC_POP();
    return newmeth;
}