Example #1
0
// calls fptr on each jl_typemap_entry_t in cache in sort order
// for which type ∩ ml->type != Union{}, until fptr return false
static int jl_typemap_intersection_node_visitor(jl_typemap_entry_t *ml, struct typemap_intersection_env *closure)
{
    // slow-path scan everything in ml
    // mark this `register` because (for branch prediction)
    // that can be absolutely critical for speed
    register jl_typemap_intersection_visitor_fptr fptr = closure->fptr;
    while (ml != (void*)jl_nothing) {
        if (closure->type == (jl_value_t*)ml->sig) {
            // fast-path for the intersection of a type with itself
            if (closure->env)
                closure->env = ml->tvars;
            closure->ti = closure->type;
            if (!fptr(ml, closure))
                return 0;
        }
        else {
            jl_value_t *ti;
            if (closure->env) {
                closure->env = jl_emptysvec;
                ti = jl_lookup_match(closure->type, (jl_value_t*)ml->sig, &closure->env, ml->tvars);
            }
            else {
                ti = jl_type_intersection(closure->type, (jl_value_t*)ml->sig);
            }
            if (ti != (jl_value_t*)jl_bottom_type) {
                closure->ti = ti;
                if (!fptr(ml, closure))
                    return 0;
            }
        }
        ml = ml->next;
    }
    return 1;
}
Example #2
0
int jl_typemap_intersection_visitor(union jl_typemap_t map, int offs,
        struct typemap_intersection_env *closure)
{
    if (jl_typeof(map.unknown) == (jl_value_t*)jl_typemap_level_type) {
        jl_typemap_level_t *cache = map.node;
        jl_value_t *ty = NULL;
        size_t l = jl_datatype_nfields(closure->type);
        if (closure->va && l <= offs + 1) {
            ty = closure->va;
        }
        else if (l > offs) {
            ty = jl_tparam(closure->type, offs);
        }
        if (ty) {
            if (cache->targ != (void*)jl_nothing) {
                jl_value_t *typetype = jl_is_type_type(ty) ? jl_tparam0(ty) : NULL;
                if (typetype && !jl_has_typevars(typetype)) {
                    if (is_cache_leaf(typetype)) {
                        // direct lookup of leaf types
                        union jl_typemap_t ml = mtcache_hash_lookup(cache->targ, typetype, 1, offs);
                        if (ml.unknown != jl_nothing) {
                            if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0;
                        }
                    }
                }
                else {
                    // else an array scan is required to check subtypes
                    // first, fast-path: optimized pre-intersection test to see if `ty` could intersect with any Type
                    if (typetype || jl_type_intersection((jl_value_t*)jl_type_type, ty) != jl_bottom_type)
                        if (!jl_typemap_intersection_array_visitor(cache->targ, ty, 1, offs, closure)) return 0;
                }
            }
            if (cache->arg1 != (void*)jl_nothing) {
                if (is_cache_leaf(ty)) {
                    // direct lookup of leaf types
                    union jl_typemap_t ml = mtcache_hash_lookup(cache->arg1, ty, 0, offs);
                    if (ml.unknown != jl_nothing) {
                        if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0;
                    }
                }
                else {
                    // else an array scan is required to check subtypes
                    if (!jl_typemap_intersection_array_visitor(cache->arg1, ty, 0, offs, closure)) return 0;
                }
            }
        }
        if (!jl_typemap_intersection_node_visitor(map.node->linear, closure))
            return 0;
        if (ty)
            return jl_typemap_intersection_visitor(map.node->any, offs+1, closure);
        return 1;
    }
    else {
        return jl_typemap_intersection_node_visitor(map.leaf, closure);
    }
}
Example #3
0
File: gf.c Project: cshen/julia
/*
  warn about ambiguous method priorities
  
  the relative priority of A and B is ambiguous if
  !subtype(A,B) && !subtype(B,A) && no corresponding tuple
  elements are disjoint.
  
  for example, (AbstractArray, AbstractMatrix) and (AbstractMatrix, AbstractArray) are ambiguous.
  however, (AbstractArray, AbstractMatrix, Foo) and (AbstractMatrix, AbstractArray, Bar) are fine
  since Foo and Bar are disjoint, so there would be no confusion over
  which one to call.
  
  There is also this kind of ambiguity: foo{T,S}(T, S) vs. foo(Any,Any)
  In this case jl_types_equal() is true, but one is jl_type_morespecific
  or jl_type_match_morespecific than the other.
  To check this, jl_types_equal_generic needs to be more sophisticated
  so (T,T) is not equivalent to (Any,Any). (TODO)
*/
static void check_ambiguous(jl_methlist_t *ml, jl_tuple_t *type,
                            jl_tuple_t *sig, jl_sym_t *fname)
{
    // we know !jl_args_morespecific(type, sig)
    if ((type->length==sig->length ||
         (type->length==sig->length+1 && is_va_tuple(type)) ||
         (type->length+1==sig->length && is_va_tuple(sig))) &&
        !jl_args_morespecific((jl_value_t*)sig, (jl_value_t*)type)) {
        jl_value_t *isect = jl_type_intersection((jl_value_t*)type,
                                                 (jl_value_t*)sig);
        if (isect == (jl_value_t*)jl_bottom_type)
            return;
        JL_GC_PUSH(&isect);
        jl_methlist_t *l = ml;
        while (l != NULL) {
            if (sigs_eq(isect, (jl_value_t*)l->sig))
                goto done_chk_amb;  // ok, intersection is covered
            l = l->next;
        }
        char *n = fname->name;
        jl_value_t *errstream = jl_get_global(jl_system_module,
                                              jl_symbol("stderr_stream"));
        JL_TRY {
            if (errstream)
                jl_set_current_output_stream_obj(errstream);
            ios_t *s = jl_current_output_stream();
            ios_printf(s, "Warning: New definition %s", n);
            jl_show((jl_value_t*)type);
            ios_printf(s, " is ambiguous with %s", n);
            jl_show((jl_value_t*)sig);
            ios_printf(s, ".\n         Make sure %s", n);
            jl_show(isect);
            ios_printf(s, " is defined first.\n");
        }
        JL_CATCH {
            jl_raise(jl_exception_in_transit);
        }
    done_chk_amb:
        JL_GC_POP();
    }
Example #4
0
File: gf.c Project: cshen/julia
static jl_function_t *cache_method(jl_methtable_t *mt, jl_tuple_t *type,
                                   jl_function_t *method, jl_tuple_t *decl,
                                   jl_tuple_t *sparams)
{
    size_t i;
    int need_dummy_entries = 0;
    jl_value_t *temp=NULL;
    jl_function_t *newmeth=NULL;
    JL_GC_PUSH(&type, &temp, &newmeth);

    for (i=0; i < type->length; i++) {
        jl_value_t *elt = jl_tupleref(type,i);
        int set_to_any = 0;
        if (nth_slot_type(decl,i) == jl_ANY_flag) {
            // don't specialize on slots marked ANY
            temp = jl_tupleref(type, i);
            jl_tupleset(type, i, (jl_value_t*)jl_any_type);
            int nintr=0;
            jl_methlist_t *curr = mt->defs;
            // if this method is the only match even with the current slot
            // set to Any, then it is safe to cache it that way.
            while (curr != NULL && curr->func!=method) {
                if (jl_type_intersection((jl_value_t*)curr->sig,
                                         (jl_value_t*)type) !=
                    (jl_value_t*)jl_bottom_type) {
                    nintr++;
                    break;
                }
                curr = curr->next;
            }
            if (nintr) {
                // TODO: even if different specializations of this slot need
                // separate cache entries, have them share code.
                jl_tupleset(type, i, temp);
            }
            else {
                set_to_any = 1;
            }
        }
        if (set_to_any) {
        }
        else if (jl_is_tuple(elt)) {
            /*
              don't cache tuple type exactly; just remember that it was
              a tuple, unless the declaration asks for something more
              specific. determined with a type intersection.
            */
            int might_need_dummy=0;
            temp = jl_tupleref(type, i);
            if (i < decl->length) {
                jl_value_t *declt = jl_tupleref(decl,i);
                // for T..., intersect with T
                if (jl_is_seq_type(declt))
                    declt = jl_tparam0(declt);
                if (declt == (jl_value_t*)jl_tuple_type ||
                    jl_subtype((jl_value_t*)jl_tuple_type, declt, 0)) {
                    // don't specialize args that matched (Any...) or Any
                    jl_tupleset(type, i, (jl_value_t*)jl_tuple_type);
                    might_need_dummy = 1;
                }
                else {
                    declt = jl_type_intersection(declt,
                                                 (jl_value_t*)jl_tuple_type);
                    if (((jl_tuple_t*)elt)->length > 3 ||
                        tuple_all_Any((jl_tuple_t*)declt)) {
                        jl_tupleset(type, i, declt);
                        might_need_dummy = 1;
                    }
                }
            }
            else {
                jl_tupleset(type, i, (jl_value_t*)jl_tuple_type);
                might_need_dummy = 1;
            }
            assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type);
            if (might_need_dummy) {
                jl_methlist_t *curr = mt->defs;
                // can't generalize type if there's an overlapping definition
                // with typevars
                while (curr != NULL && curr->func!=method) {
                    if (curr->tvars!=jl_null &&
                        jl_type_intersection((jl_value_t*)curr->sig,
                                             (jl_value_t*)type) !=
                        (jl_value_t*)jl_bottom_type) {
                        jl_tupleset(type, i, temp);
                        might_need_dummy = 0;
                        break;
                    }
                    curr = curr->next;
                }
            }
            if (might_need_dummy) {
                jl_methlist_t *curr = mt->defs;
                while (curr != NULL && curr->func!=method) {
                    jl_tuple_t *sig = curr->sig;
                    if (sig->length > i &&
                        jl_is_tuple(jl_tupleref(sig,i))) {
                        need_dummy_entries = 1;
                        break;
                    }
                    curr = curr->next;
                }
            }
        }
        else if (jl_is_type_type(elt) && jl_is_type_type(jl_tparam0(elt))) {
            /*
              actual argument was Type{...}, we computed its type as
              Type{Type{...}}. we must avoid unbounded nesting here, so
              cache the signature as Type{T}, unless something more
              specific like Type{Type{Int32}} was actually declared.
              this can be determined using a type intersection.
            */
            if (i < decl->length) {
                jl_value_t *declt = jl_tupleref(decl,i);
                // for T..., intersect with T
                if (jl_is_seq_type(declt))
                    declt = jl_tparam0(declt);
                jl_tupleset(type, i,
                            jl_type_intersection(declt, (jl_value_t*)jl_typetype_type));
            }
            else {
                jl_tupleset(type, i, (jl_value_t*)jl_typetype_type);
            }
            assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type);
        }
        else if (jl_is_type_type(elt) &&
                 very_general_type(nth_slot_type(decl,i))) {
            /*
              here's a fairly complex heuristic: if this argument slot's
              declared type is Any, and no definition overlaps with Type
              for this slot, then don't specialize for every Type that
              might be passed.
              Since every type x has its own type Type{x}, this would be
              excessive specialization for an Any slot.
            */
            int ok=1;
            jl_methlist_t *curr = mt->defs;
            while (curr != NULL) {
                jl_value_t *slottype = nth_slot_type(curr->sig, i);
                if (slottype &&
                    !very_general_type(slottype) &&
                    jl_type_intersection(slottype,
                                         (jl_value_t*)jl_type_type) !=
                    (jl_value_t*)jl_bottom_type) {
                    ok=0;
                    break;
                }
                curr = curr->next;
            }
            if (ok) {
                jl_tupleset(type, i, (jl_value_t*)jl_typetype_type);
            }
        }
    }

    // for varargs methods, only specialize up to max_args.
    // in general, here we want to find the biggest type that's not a
    // supertype of any other method signatures. so far we are conservative
    // and the types we find should be bigger.
    if (type->length > jl_unbox_long(mt->max_args) &&
        jl_is_seq_type(jl_tupleref(decl,decl->length-1))) {
        size_t nspec = jl_unbox_long(mt->max_args)+2;
        jl_tuple_t *limited = jl_alloc_tuple(nspec);
        for(i=0; i < nspec-1; i++) {
            jl_tupleset(limited, i, jl_tupleref(type, i));
        }
        jl_value_t *lasttype = jl_tupleref(type,i-1);
        // if all subsequent arguments are subtypes of lasttype, specialize
        // on that instead of decl. for example, if decl is
        // (Any...)
        // and type is
        // (Symbol, Symbol, Symbol)
        // then specialize as (Symbol...), but if type is
        // (Symbol, Int32, Expr)
        // then specialize as (Any...)
        size_t j = i;
        int all_are_subtypes=1;
        for(; j < type->length; j++) {
            if (!jl_subtype(jl_tupleref(type,j), lasttype, 0)) {
                all_are_subtypes = 0;
                break;
            }
        }
        type = limited;
        if (all_are_subtypes) {
            // avoid Type{Type{...}...}...
            if (jl_is_type_type(lasttype))
                lasttype = (jl_value_t*)jl_type_type;
            temp = (jl_value_t*)jl_tuple1(lasttype);
            jl_tupleset(type, i, jl_apply_type((jl_value_t*)jl_seq_type,
                                               (jl_tuple_t*)temp));
        }
        else {
            jl_value_t *lastdeclt = jl_tupleref(decl,decl->length-1);
            if (sparams->length > 0) {
                lastdeclt = (jl_value_t*)
                    jl_instantiate_type_with((jl_type_t*)lastdeclt,
                                             sparams->data,
                                             sparams->length/2);
            }
            jl_tupleset(type, i, lastdeclt);
        }
        // now there is a problem: the computed signature is more
        // general than just the given arguments, so it might conflict
        // with another definition that doesn't have cache instances yet.
        // to fix this, we insert dummy cache entries for all intersections
        // of this signature and definitions. those dummy entries will
        // supersede this one in conflicted cases, alerting us that there
        // should actually be a cache miss.
        need_dummy_entries = 1;
    }

    if (need_dummy_entries) {
        temp = ml_matches(mt->defs, (jl_value_t*)type, lambda_sym, -1);
        for(i=0; i < jl_array_len(temp); i++) {
            jl_value_t *m = jl_cellref(temp, i);
            if (jl_tupleref(m,2) != (jl_value_t*)method->linfo) {
                jl_method_cache_insert(mt, (jl_tuple_t*)jl_tupleref(m, 0),
                                       NULL);
            }
        }
    }

    // here we infer types and specialize the method
    /*
    if (sparams==jl_null)
        newmeth = method;
    else
    */
    jl_array_t *lilist=NULL;
    jl_lambda_info_t *li=NULL;
    if (method->linfo && method->linfo->specializations!=NULL) {
        // reuse code already generated for this combination of lambda and
        // arguments types. this happens for inner generic functions where
        // a new closure is generated on each call to the enclosing function.
        lilist = method->linfo->specializations;
        int k;
        for(k=0; k < lilist->length; k++) {
            li = (jl_lambda_info_t*)jl_cellref(lilist, k);
            if (jl_types_equal(li->specTypes, (jl_value_t*)type))
                break;
        }
        if (k == lilist->length) lilist=NULL;
    }
    if (lilist != NULL && !li->inInference) {
        assert(li);
        newmeth = jl_reinstantiate_method(method, li);
        (void)jl_method_cache_insert(mt, type, newmeth);
        JL_GC_POP();
        return newmeth;
    }
    else {
        newmeth = jl_instantiate_method(method, sparams);
    }
    /*
      if "method" itself can ever be compiled, for example for use as
      an unspecialized method (see below), then newmeth->fptr might point
      to some slow compiled code instead of jl_trampoline, meaning our
      type-inferred code would never get compiled. this can be fixed with
      the commented-out snippet below.
    */
    assert(!(newmeth->linfo && newmeth->linfo->ast) ||
           newmeth->fptr == &jl_trampoline);
    /*
    if (newmeth->linfo&&newmeth->linfo->ast&&newmeth->fptr!=&jl_trampoline) {
        newmeth->fptr = &jl_trampoline;
    }
    */

    (void)jl_method_cache_insert(mt, type, newmeth);

    if (newmeth->linfo != NULL && newmeth->linfo->sparams == jl_null) {
        // when there are no static parameters, one unspecialized version
        // of a function can be shared among all cached specializations.
        if (method->linfo->unspecialized == NULL) {
            method->linfo->unspecialized =
                jl_instantiate_method(method, jl_null);
        }
        newmeth->linfo->unspecialized = method->linfo->unspecialized;
    }

    if (newmeth->linfo != NULL && newmeth->linfo->ast != NULL) {
        newmeth->linfo->specTypes = (jl_value_t*)type;
        jl_array_t *spe = method->linfo->specializations;
        if (spe == NULL) {
            spe = jl_alloc_cell_1d(1);
            jl_cellset(spe, 0, newmeth->linfo);
        }
        else {
            jl_cell_1d_push(spe, (jl_value_t*)newmeth->linfo);
        }
        method->linfo->specializations = spe;
        jl_type_infer(newmeth->linfo, type, method->linfo);
    }
    JL_GC_POP();
    return newmeth;
}
Example #5
0
// calls fptr on each jl_typemap_entry_t in cache in sort order
// for which type ∩ ml->type != Union{}, until fptr return false
int jl_typemap_intersection_visitor(union jl_typemap_t map, int offs,
        struct typemap_intersection_env *closure)
{
    jl_typemap_entry_t *ml;
    if (jl_typeof(map.unknown) == (jl_value_t*)jl_typemap_level_type) {
        jl_typemap_level_t *cache = map.node;
        jl_value_t *ty = NULL;
        size_t l = jl_datatype_nfields(closure->type);
        if (closure->va && l == offs - 1) {
            ty = closure->va;
        }
        else if (l > offs) {
            ty = jl_tparam(closure->type, offs);
        }
        if (ty) {
            if (cache->targ != (void*)jl_nothing) {
                if (jl_is_type_type(ty) && is_cache_leaf(jl_tparam0(ty))) {
                    // direct lookup of leaf types
                    union jl_typemap_t ml = mtcache_hash_lookup(cache->targ, jl_tparam0(ty), 1, offs);
                    if (ml.unknown != jl_nothing) {
                        if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0;
                    }
                }
                else {
                    // else an array scan is required to check subtypes
                    // TODO: fast-path: optimized pre-intersection test
                    if (!jl_typemap_intersection_array_visitor(cache->targ, ty, 1, offs, closure)) return 0;
                }
            }
            if (cache->arg1 != (void*)jl_nothing) {
                if (is_cache_leaf(ty)) {
                    // direct lookup of leaf types
                    union jl_typemap_t ml = mtcache_hash_lookup(cache->arg1, ty, 0, offs);
                    if (ml.unknown != jl_nothing) {
                        if (!jl_typemap_intersection_visitor(ml, offs+1, closure)) return 0;
                    }
                }
                else {
                    // else an array scan is required to check subtypes
                    if (!jl_typemap_intersection_array_visitor(cache->arg1, ty, 0, offs, closure)) return 0;
                }
            }
        }
        ml = map.node->linear;
    }
    else {
        ml = map.leaf;
    }
    // slow-path scan everything else
    // mark this `register` because (for branch prediction)
    // that can be absolutely critical for speed
    register jl_typemap_intersection_visitor_fptr fptr = closure->fptr;
    while (ml != (void*)jl_nothing) {
        // TODO: optimize intersection test
        if (closure->type == (jl_value_t*)ml->sig) {
            // fast-path for the intersection of a type with itself
            if (closure->env)
                closure->env = ml->tvars;
            closure->ti = closure->type;
            if (!fptr(ml, closure))
                return 0;
        }
        else {
            jl_value_t *ti;
            if (closure->env) {
                closure->env = jl_emptysvec;
                ti = jl_lookup_match(closure->type, (jl_value_t*)ml->sig, &closure->env, ml->tvars);
            }
            else {
                ti = jl_type_intersection(closure->type, (jl_value_t*)ml->sig);
            }
            if (ti != (jl_value_t*)jl_bottom_type) {
                closure->ti = ti;
                if (!fptr(ml, closure))
                    return 0;
            }
        }
        ml = ml->next;
    }
    return 1;
}