Example #1
0
JL_DLLEXPORT jl_value_t *jl_cglobal(jl_value_t *v, jl_value_t *ty)
{
    JL_TYPECHK(cglobal, type, ty);
    JL_GC_PUSH1(&v);
    jl_value_t *rt =
        ty == (jl_value_t*)jl_void_type ? (jl_value_t*)jl_voidpointer_type : // a common case
            (jl_value_t*)jl_apply_type1((jl_value_t*)jl_pointer_type, ty);
    JL_GC_PROMISE_ROOTED(rt); // (JL_ALWAYS_LEAFTYPE)

    if (!jl_is_concrete_type(rt))
        jl_error("cglobal: type argument not concrete");

    if (jl_is_tuple(v) && jl_nfields(v) == 1)
        v = jl_fieldref(v, 0);

    if (jl_is_pointer(v)) {
        v = jl_bitcast(rt, v);
        JL_GC_POP();
        return v;
    }

    char *f_lib = NULL;
    if (jl_is_tuple(v) && jl_nfields(v) > 1) {
        jl_value_t *t1 = jl_fieldref_noalloc(v, 1);
        v = jl_fieldref(v, 0);
        if (jl_is_symbol(t1))
            f_lib = jl_symbol_name((jl_sym_t*)t1);
        else if (jl_is_string(t1))
            f_lib = jl_string_data(t1);
        else
            JL_TYPECHK(cglobal, symbol, t1)
    }
Example #2
0
// run time version of bitcast intrinsic
JL_DLLEXPORT jl_value_t *jl_bitcast(jl_value_t *ty, jl_value_t *v)
{
    JL_TYPECHK(bitcast, datatype, ty);
    if (!jl_is_concrete_type(ty) || !jl_is_primitivetype(ty))
        jl_error("bitcast: target type not a leaf primitive type");
    if (!jl_is_primitivetype(jl_typeof(v)))
        jl_error("bitcast: value not a primitive type");
    if (jl_datatype_size(jl_typeof(v)) != jl_datatype_size(ty))
        jl_error("bitcast: argument size does not match size of target type");
    if (ty == jl_typeof(v))
        return v;
    if (ty == (jl_value_t*)jl_bool_type)
        return *(uint8_t*)jl_data_ptr(v) & 1 ? jl_true : jl_false;
    return jl_new_bits(ty, jl_data_ptr(v));
}
Example #3
0
void jl_compute_field_offsets(jl_datatype_t *st)
{
    size_t sz = 0, alignm = 1;
    int homogeneous = 1;
    jl_value_t *lastty = NULL;
    uint64_t max_offset = (((uint64_t)1) << 32) - 1;
    uint64_t max_size = max_offset >> 1;

    if (st->name->wrapper) {
        jl_datatype_t *w = (jl_datatype_t*)jl_unwrap_unionall(st->name->wrapper);
        // compute whether this type can be inlined
        // based on whether its definition is self-referential
        if (w->types != NULL) {
            st->isbitstype = st->isconcretetype && !st->mutabl;
            size_t i, nf = jl_field_count(st);
            for (i = 0; i < nf; i++) {
                jl_value_t *fld = jl_field_type(st, i);
                if (st->isbitstype)
                    st->isbitstype = jl_is_datatype(fld) && ((jl_datatype_t*)fld)->isbitstype;
                if (!st->zeroinit)
                    st->zeroinit = (jl_is_datatype(fld) && ((jl_datatype_t*)fld)->isinlinealloc) ? ((jl_datatype_t*)fld)->zeroinit : 1;
            }
            if (st->isbitstype) {
                st->isinlinealloc = 1;
                size_t i, nf = jl_field_count(w);
                for (i = 0; i < nf; i++) {
                    jl_value_t *fld = jl_field_type(w, i);
                    if (references_name(fld, w->name)) {
                        st->isinlinealloc = 0;
                        st->isbitstype = 0;
                        st->zeroinit = 1;
                        break;
                    }
                }
            }
        }
        // If layout doesn't depend on type parameters, it's stored in st->name->wrapper
        // and reused by all subtypes.
        if (st != w && // this check allows us to re-compute layout for some types during init
                w->layout) {
            st->layout = w->layout;
            st->size = w->size;
            jl_allocate_singleton_instance(st);
            return;
        }
    }
    if (st->types == NULL || (jl_is_namedtuple_type(st) && !jl_is_concrete_type((jl_value_t*)st)))
        return;
    uint32_t nfields = jl_svec_len(st->types);
    if (nfields == 0) {
        if (st == jl_sym_type || st == jl_string_type) {
            // opaque layout - heap-allocated blob
            static const jl_datatype_layout_t opaque_byte_layout = {0, 1, 0, 1, 0};
            st->layout = &opaque_byte_layout;
        }
        else if (st == jl_simplevector_type || st->name == jl_array_typename) {
            static const jl_datatype_layout_t opaque_ptr_layout = {0, sizeof(void*), 0, 1, 0};
            st->layout = &opaque_ptr_layout;
        }
        else {
            // reuse the same layout for all singletons
            static const jl_datatype_layout_t singleton_layout = {0, 1, 0, 0, 0};
            st->layout = &singleton_layout;
            jl_allocate_singleton_instance(st);
        }
        return;
    }
    if (!jl_is_concrete_type((jl_value_t*)st)) {
        // compute layout whenever field types have no free variables
        for (size_t i = 0; i < nfields; i++) {
            if (jl_has_free_typevars(jl_field_type(st, i)))
                return;
        }
    }

    size_t descsz = nfields * sizeof(jl_fielddesc32_t);
    jl_fielddesc32_t *desc;
    if (descsz < jl_page_size)
        desc = (jl_fielddesc32_t*)alloca(descsz);
    else
        desc = (jl_fielddesc32_t*)malloc(descsz);
    int haspadding = 0;
    assert(st->name == jl_tuple_typename ||
           st == jl_sym_type ||
           st == jl_simplevector_type ||
           nfields != 0);

    for (size_t i = 0; i < nfields; i++) {
        jl_value_t *ty = jl_field_type(st, i);
        size_t fsz = 0, al = 0;
        if (jl_islayout_inline(ty, &fsz, &al)) {
            if (__unlikely(fsz > max_size))
                // Should never happen
                goto throw_ovf;
            desc[i].isptr = 0;
            if (jl_is_uniontype(ty)) {
                haspadding = 1;
                fsz += 1; // selector byte
            }
            else { // isbits struct
                if (((jl_datatype_t*)ty)->layout->haspadding)
                    haspadding = 1;
            }
        }
        else {
            fsz = sizeof(void*);
            if (fsz > MAX_ALIGN)
                fsz = MAX_ALIGN;
            al = fsz;
            desc[i].isptr = 1;
        }
        assert(al <= JL_HEAP_ALIGNMENT && (JL_HEAP_ALIGNMENT % al) == 0);
        if (al != 0) {
            size_t alsz = LLT_ALIGN(sz, al);
            if (sz & (al - 1))
                haspadding = 1;
            sz = alsz;
            if (al > alignm)
                alignm = al;
        }
        homogeneous &= lastty==NULL || lastty==ty;
        lastty = ty;
        desc[i].offset = sz;
        desc[i].size = fsz;
        if (__unlikely(max_offset - sz < fsz))
            goto throw_ovf;
        sz += fsz;
    }
    if (homogeneous && lastty != NULL && jl_is_tuple_type(st)) {
        // Some tuples become LLVM vectors with stronger alignment than what was calculated above.
        unsigned al = jl_special_vector_alignment(nfields, lastty);
        assert(al % alignm == 0);
        // JL_HEAP_ALIGNMENT is the biggest alignment we can guarantee on the heap.
        if (al > JL_HEAP_ALIGNMENT)
            alignm = JL_HEAP_ALIGNMENT;
        else if (al)
            alignm = al;
    }
    st->size = LLT_ALIGN(sz, alignm);
    if (st->size > sz)
        haspadding = 1;
    st->layout = jl_get_layout(nfields, alignm, haspadding, desc);
    if (descsz >= jl_page_size) free(desc);
    jl_allocate_singleton_instance(st);
    return;
 throw_ovf:
    if (descsz >= jl_page_size) free(desc);
    jl_errorf("type %s has field offset %d that exceeds the page size", jl_symbol_name(st->name->name), descsz);
}
Example #4
0
// f{<:Union{...}}(...) is a common pattern
// and expanding the Union may give a leaf function
static void _compile_all_tvar_union(jl_value_t *methsig)
{
    if (!jl_is_unionall(methsig) && jl_is_dispatch_tupletype(methsig)) {
        // usually can create a specialized version of the function,
        // if the signature is already a dispatch type
        if (jl_compile_hint((jl_tupletype_t*)methsig))
            return;
    }

    int tvarslen = jl_subtype_env_size(methsig);
    jl_value_t *sigbody = methsig;
    jl_value_t **env;
    JL_GC_PUSHARGS(env, 2 * tvarslen);
    int *idx = (int*)alloca(sizeof(int) * tvarslen);
    int i;
    for (i = 0; i < tvarslen; i++) {
        assert(jl_is_unionall(sigbody));
        idx[i] = 0;
        env[2 * i] = (jl_value_t*)((jl_unionall_t*)sigbody)->var;
        env[2 * i + 1] = jl_bottom_type; // initialize the list with Union{}, since T<:Union{} is always a valid option
        sigbody = ((jl_unionall_t*)sigbody)->body;
    }

    for (i = 0; i < tvarslen; /* incremented by inner loop */) {
        jl_value_t *sig;
        JL_TRY {
            // TODO: wrap in UnionAll for each tvar in env[2*i + 1] ?
            // currently doesn't matter much, since jl_compile_hint doesn't work on abstract types
            sig = (jl_value_t*)jl_instantiate_type_with(sigbody, env, tvarslen);
        }
        JL_CATCH {
            goto getnext; // sigh, we found an invalid type signature. should we warn the user?
        }
        if (!jl_has_concrete_subtype(sig))
            goto getnext; // signature wouldn't be callable / is invalid -- skip it
        if (jl_is_concrete_type(sig)) {
            JL_GC_PROMISE_ROOTED(sig); // `sig` is rooted because it's a leaftype (JL_ALWAYS_LEAFTYPE)
            if (jl_compile_hint((jl_tupletype_t*)sig))
                goto getnext; // success
        }

    getnext:
        for (i = 0; i < tvarslen; i++) {
            jl_tvar_t *tv = (jl_tvar_t*)env[2 * i];
            if (jl_is_uniontype(tv->ub)) {
                size_t l = jl_count_union_components(tv->ub);
                size_t j = idx[i];
                if (j == l) {
                    env[2 * i + 1] = jl_bottom_type;
                    idx[i] = 0;
                }
                else {
                    jl_value_t *ty = jl_nth_union_component(tv->ub, j);
                    if (!jl_is_concrete_type(ty))
                        ty = (jl_value_t*)jl_new_typevar(tv->name, tv->lb, ty);
                    env[2 * i + 1] = ty;
                    idx[i] = j + 1;
                    break;
                }
            }
            else {
                env[2 * i + 1] = (jl_value_t*)tv;
            }
        }
    }
    JL_GC_POP();
}
Example #5
0
jl_typemap_entry_t *jl_typemap_insert(union jl_typemap_t *cache, jl_value_t *parent,
                                      jl_tupletype_t *type,
                                      jl_tupletype_t *simpletype, jl_svec_t *guardsigs,
                                      jl_value_t *newvalue, int8_t offs,
                                      const struct jl_typemap_info *tparams,
                                      size_t min_world, size_t max_world,
                                      jl_value_t **overwritten)
{
    jl_ptls_t ptls = jl_get_ptls_states();
    assert(min_world > 0 && max_world > 0);
    if (!simpletype)
        simpletype = (jl_tupletype_t*)jl_nothing;
    jl_value_t *ttype = jl_unwrap_unionall((jl_value_t*)type);

    if ((jl_value_t*)simpletype == jl_nothing) {
        jl_typemap_entry_t *ml = jl_typemap_assoc_by_type(*cache, (jl_value_t*)type, NULL, 0, offs, min_world, 0);
        if (ml && ml->simplesig == (void*)jl_nothing) {
            if (overwritten != NULL)
                *overwritten = ml->func.value;
            if (newvalue == ml->func.value) // no change. TODO: involve world in computation!
                return ml;
            if (newvalue == NULL)  // don't overwrite with guard entries
                return ml;
            ml->max_world = min_world - 1;
        }
    }

    jl_typemap_entry_t *newrec =
        (jl_typemap_entry_t*)jl_gc_alloc(ptls, sizeof(jl_typemap_entry_t),
                                         jl_typemap_entry_type);
    newrec->sig = type;
    newrec->simplesig = simpletype;
    newrec->func.value = newvalue;
    newrec->guardsigs = guardsigs;
    newrec->next = (jl_typemap_entry_t*)jl_nothing;
    newrec->min_world = min_world;
    newrec->max_world = max_world;
    // compute the complexity of this type signature
    newrec->va = jl_is_va_tuple((jl_datatype_t*)ttype);
    newrec->issimplesig = !jl_is_unionall(type); // a TypeVar environment needs a complex matching test
    newrec->isleafsig = newrec->issimplesig && !newrec->va; // entirely leaf types don't need to be sorted
    JL_GC_PUSH1(&newrec);
    assert(jl_is_tuple_type(ttype));
    size_t i, l;
    for (i = 0, l = jl_field_count(ttype); i < l && newrec->issimplesig; i++) {
        jl_value_t *decl = jl_field_type(ttype, i);
        if (jl_is_kind(decl))
            newrec->isleafsig = 0; // Type{} may have a higher priority than a kind
        else if (jl_is_type_type(decl))
            newrec->isleafsig = 0; // Type{} may need special processing to compute the match
        else if (jl_is_vararg_type(decl))
            newrec->isleafsig = 0; // makes iteration easier when the endpoints are the same
        else if (decl == (jl_value_t*)jl_any_type)
            newrec->isleafsig = 0; // Any needs to go in the general cache
        else if (!jl_is_concrete_type(decl)) // anything else needs to go through the general subtyping test
            newrec->isleafsig = newrec->issimplesig = 0;
    }
    // TODO: assert that guardsigs == jl_emptysvec && simplesig == jl_nothing if isleafsig and optimize with that knowledge?
    jl_typemap_insert_generic(cache, parent, newrec, NULL, offs, tparams);
    JL_GC_POP();
    return newrec;
}