Пример #1
0
static void show_type(jl_value_t *st, jl_value_t *t)
{
    ios_t *s = (ios_t*)jl_iostr_data(st);
    if (jl_is_union_type(t)) {
        if (t == (jl_value_t*)jl_bottom_type) {
            JL_WRITE(s, "None", 4);
        }
        else if (t == jl_top_type) {
            JL_WRITE(s, "Top", 3);
        }
        else {
            JL_WRITE(s, "Union", 5);
            jl_show_tuple(st, ((jl_uniontype_t*)t)->types, '(', ')', 0);
        }
    }
    else if (jl_is_seq_type(t)) {
        jl_show(st, jl_tparam0(t));
        JL_WRITE(s, "...", 3);
    }
    else if (jl_is_typector(t)) {
        jl_show(st, (jl_value_t*)((jl_typector_t*)t)->body);
    }
    else {
        assert(jl_is_some_tag_type(t));
        jl_tag_type_t *tt = (jl_tag_type_t*)t;
        JL_PUTS(tt->name->name->name, s);
        jl_tuple_t *p = tt->parameters;
        if (jl_tuple_len(p) > 0)
            jl_show_tuple(st, p, '{', '}', 0);
    }
}
Пример #2
0
Файл: gf.c Проект: cshen/julia
static jl_value_t *nth_slot_type(jl_tuple_t *sig, size_t i)
{
    size_t len = sig->length;
    if (len == 0)
        return NULL;
    if (i < len-1)
        return jl_tupleref(sig, i);
    if (jl_is_seq_type(jl_tupleref(sig,len-1))) {
        return jl_tparam0(jl_tupleref(sig,len-1));
    }
    if (i == len-1)
        return jl_tupleref(sig, i);
    return NULL;
}
Пример #3
0
static void show_type(jl_value_t *t)
{
    ios_t *s = jl_current_output_stream();
    if (jl_is_func_type(t)) {
        if (t == (jl_value_t*)jl_any_func) {
            ios_puts("Function", s);
        }
        else {
            jl_show((jl_value_t*)((jl_func_type_t*)t)->from);
            ios_write(s, "-->", 3);
            jl_show((jl_value_t*)((jl_func_type_t*)t)->to);
        }
    }
    else if (t == (jl_value_t*)jl_function_type) {
        ios_puts("Function", s);
    }
    else if (jl_is_union_type(t)) {
        if (t == (jl_value_t*)jl_bottom_type) {
            ios_write(s, "None", 4);
        }
        else if (t == jl_top_type) {
            ios_write(s, "Top", 3);
        }
        else {
            ios_write(s, "Union", 5);
            show_tuple(((jl_uniontype_t*)t)->types, '(', ')', 0);
        }
    }
    else if (jl_is_seq_type(t)) {
        jl_show(jl_tparam0(t));
        ios_write(s, "...", 3);
    }
    else if (jl_is_typector(t)) {
        jl_show((jl_value_t*)((jl_typector_t*)t)->body);
    }
    else {
        assert(jl_is_some_tag_type(t));
        jl_tag_type_t *tt = (jl_tag_type_t*)t;
        ios_puts(tt->name->name->name, s);
        jl_tuple_t *p = tt->parameters;
        if (p->length > 0)
            show_tuple(p, '{', '}', 0);
    }
}
Пример #4
0
// ccall(pointer, rettype, (argtypes...), args...)
static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx)
{
    JL_NARGSV(ccall, 3);
    jl_value_t *ptr=NULL, *rt=NULL, *at=NULL;
    Value *jl_ptr=NULL;
    JL_GC_PUSH(&ptr, &rt, &at);
    ptr = static_eval(args[1], ctx, true);
    if (ptr == NULL) {
        jl_value_t *ptr_ty = expr_type(args[1], ctx);
        Value *arg1 = emit_unboxed(args[1], ctx);
        if (!jl_is_cpointer_type(ptr_ty)) {
            emit_typecheck(arg1, (jl_value_t*)jl_voidpointer_type,
                           "ccall: function argument not a pointer or valid constant", ctx);
        }
        jl_ptr = emit_unbox(T_size, T_psize, arg1);
    }
    rt  = jl_interpret_toplevel_expr_in(ctx->module, args[2],
                                        &jl_tupleref(ctx->sp,0),
                                        jl_tuple_len(ctx->sp)/2);
    if (jl_is_tuple(rt)) {
        std::string msg = "in " + ctx->funcName +
            ": ccall: missing return type";
        jl_error(msg.c_str());
    }
    at  = jl_interpret_toplevel_expr_in(ctx->module, args[3],
                                        &jl_tupleref(ctx->sp,0),
                                        jl_tuple_len(ctx->sp)/2);
    void *fptr=NULL;
    char *f_name=NULL, *f_lib=NULL;
    if (ptr != NULL) {
        if (jl_is_tuple(ptr) && jl_tuple_len(ptr)==1) {
            ptr = jl_tupleref(ptr,0);
        }
        if (jl_is_symbol(ptr))
            f_name = ((jl_sym_t*)ptr)->name;
        else if (jl_is_byte_string(ptr))
            f_name = jl_string_data(ptr);
        if (f_name != NULL) {
            // just symbol, default to JuliaDLHandle
#ifdef __WIN32__
            fptr = jl_dlsym_e(jl_dl_handle, f_name);
            if (!fptr) {
                //TODO: when one of these succeeds, store the f_lib name (and clear fptr)
                fptr = jl_dlsym_e(jl_kernel32_handle, f_name);
                if (!fptr) {
                    fptr = jl_dlsym_e(jl_ntdll_handle, f_name);
                    if (!fptr) {
                        fptr = jl_dlsym_e(jl_crtdll_handle, f_name);
                        if (!fptr) {
                            fptr = jl_dlsym(jl_winsock_handle, f_name);
                        }
                    }
                }
            }
            else {
                // available in process symbol table
                fptr = NULL;
            }
#else
            // will look in process symbol table
#endif
        }
        else if (jl_is_cpointer_type(jl_typeof(ptr))) {
            fptr = *(void**)jl_bits_data(ptr);
        }
        else if (jl_is_tuple(ptr) && jl_tuple_len(ptr)>1) {
            jl_value_t *t0 = jl_tupleref(ptr,0);
            jl_value_t *t1 = jl_tupleref(ptr,1);
            if (jl_is_symbol(t0))
                f_name = ((jl_sym_t*)t0)->name;
            else if (jl_is_byte_string(t0))
                f_name = jl_string_data(t0);
            else
                JL_TYPECHK(ccall, symbol, t0);
            if (jl_is_symbol(t1))
                f_lib = ((jl_sym_t*)t1)->name;
            else if (jl_is_byte_string(t1))
                f_lib = jl_string_data(t1);
            else
                JL_TYPECHK(ccall, symbol, t1);
        }
        else {
            JL_TYPECHK(ccall, pointer, ptr);
        }
    }
    if (f_name == NULL && fptr == NULL && jl_ptr == NULL) {
        JL_GC_POP();
        emit_error("ccall: null function pointer", ctx);
        return literal_pointer_val(jl_nothing);
    }

    JL_TYPECHK(ccall, type, rt);
    JL_TYPECHK(ccall, tuple, at);
    JL_TYPECHK(ccall, type, at);
    jl_tuple_t *tt = (jl_tuple_t*)at;
    std::vector<Type *> fargt(0);
    std::vector<Type *> fargt_sig(0);
    Type *lrt = julia_type_to_llvm(rt);
    if (lrt == NULL) {
        JL_GC_POP();
        return literal_pointer_val(jl_nothing);
    }
    size_t i;
    bool haspointers = false;
    bool isVa = false;
    size_t nargt = jl_tuple_len(tt);
    std::vector<AttributeWithIndex> attrs;

    for(i=0; i < nargt; i++) {
        jl_value_t *tti = jl_tupleref(tt,i);
        if (jl_is_seq_type(tti)) {
            isVa = true;
            tti = jl_tparam0(tti);
        }
        if (jl_is_bits_type(tti)) {
            // see pull req #978. need to annotate signext/zeroext for
            // small integer arguments.
            jl_bits_type_t *bt = (jl_bits_type_t*)tti;
            if (bt->nbits < 32) {
                if (jl_signed_type == NULL) {
                    jl_signed_type = jl_get_global(jl_core_module,jl_symbol("Signed"));
                }
#ifdef LLVM32
                Attributes::AttrVal av;
                if (jl_signed_type && jl_subtype(tti, jl_signed_type, 0))
                    av = Attributes::SExt;
                else
                    av = Attributes::ZExt;
                attrs.push_back(AttributeWithIndex::get(getGlobalContext(), i+1,
                                                        ArrayRef<Attributes::AttrVal>(&av, 1)));
#else
                Attribute::AttrConst av;
                if (jl_signed_type && jl_subtype(tti, jl_signed_type, 0))
                    av = Attribute::SExt;
                else
                    av = Attribute::ZExt;
                attrs.push_back(AttributeWithIndex::get(i+1, av));
#endif
            }
        }
        Type *t = julia_type_to_llvm(tti);
        if (t == NULL) {
            JL_GC_POP();
            return literal_pointer_val(jl_nothing);
        }
        fargt.push_back(t);
        if (!isVa)
            fargt_sig.push_back(t);
    }
    // check for calling convention specifier
    CallingConv::ID cc = CallingConv::C;
    jl_value_t *last = args[nargs];
    if (jl_is_expr(last)) {
        jl_sym_t *lhd = ((jl_expr_t*)last)->head;
        if (lhd == jl_symbol("stdcall")) {
            cc = CallingConv::X86_StdCall;
            nargs--;
        }
        else if (lhd == jl_symbol("cdecl")) {
            cc = CallingConv::C;
            nargs--;
        }
        else if (lhd == jl_symbol("fastcall")) {
            cc = CallingConv::X86_FastCall;
            nargs--;
        }
        else if (lhd == jl_symbol("thiscall")) {
            cc = CallingConv::X86_ThisCall;
            nargs--;
        }
    }
    
    if ((!isVa && jl_tuple_len(tt)  != (nargs-2)/2) ||
        ( isVa && jl_tuple_len(tt)-1 > (nargs-2)/2))
        jl_error("ccall: wrong number of arguments to C function");

    // some special functions
    if (fptr == &jl_array_ptr) {
        Value *ary = emit_expr(args[4], ctx);
        JL_GC_POP();
        return mark_julia_type(builder.CreateBitCast(emit_arrayptr(ary),lrt),
                               rt);
    }

    // see if there are & arguments
    for(i=4; i < nargs+1; i+=2) {
        jl_value_t *argi = args[i];
        if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) {
            haspointers = true;
            break;
        }
    }

    // make LLVM function object for the target
    Value *llvmf;
    FunctionType *functype = FunctionType::get(lrt, fargt_sig, isVa);
    
    if (jl_ptr != NULL) {
        null_pointer_check(jl_ptr,ctx);
        Type *funcptype = PointerType::get(functype,0);
        llvmf = builder.CreateIntToPtr(jl_ptr, funcptype);
    } else if (fptr != NULL) {
        Type *funcptype = PointerType::get(functype,0);
        llvmf = literal_pointer_val(fptr, funcptype);
    }
    else {
        void *symaddr;
        if (f_lib != NULL)
            symaddr = add_library_sym(f_name, f_lib);
        else
            symaddr = sys::DynamicLibrary::SearchForAddressOfSymbol(f_name);
        if (symaddr == NULL) {
            JL_GC_POP();
            std::stringstream msg;
            msg << "ccall: could not find function ";
            msg << f_name;
            if (f_lib != NULL) {
                msg << " in library ";
                msg << f_lib;
            }
            emit_error(msg.str(), ctx);
            return literal_pointer_val(jl_nothing);
        }
        llvmf = jl_Module->getOrInsertFunction(f_name, functype);
    }

    // save temp argument area stack pointer
    Value *saveloc=NULL;
    Value *stacksave=NULL;
    if (haspointers) {
        // TODO: inline this
        saveloc = builder.CreateCall(save_arg_area_loc_func);
        stacksave =
            builder.CreateCall(Intrinsic::getDeclaration(jl_Module,
                                                         Intrinsic::stacksave));
    }

    // emit arguments
    Value *argvals[(nargs-3)/2];
    int last_depth = ctx->argDepth;
    int nargty = jl_tuple_len(tt);
    for(i=4; i < nargs+1; i+=2) {
        int ai = (i-4)/2;
        jl_value_t *argi = args[i];
        bool addressOf = false;
        if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) {
            addressOf = true;
            argi = jl_exprarg(argi,0);
        }
        Type *largty;
        jl_value_t *jargty;
        if (isVa && ai >= nargty-1) {
            largty = fargt[nargty-1];
            jargty = jl_tparam0(jl_tupleref(tt,nargty-1));
        }
        else {
            largty = fargt[ai];
            jargty = jl_tupleref(tt,ai);
        }
        Value *arg;
        if (largty == jl_pvalue_llvmt) {
            arg = emit_expr(argi, ctx, true);
        }
        else {
            arg = emit_unboxed(argi, ctx);
            if (jl_is_bits_type(expr_type(argi, ctx))) {
                if (addressOf)
                    arg = emit_unbox(largty->getContainedType(0), largty, arg);
                else
                    arg = emit_unbox(largty, PointerType::get(largty,0), arg);
            }
        }
        /*
#ifdef JL_GC_MARKSWEEP
        // make sure args are rooted
        if (largty->isPointerTy() &&
            (largty == jl_pvalue_llvmt ||
             !jl_is_bits_type(expr_type(args[i], ctx)))) {
            make_gcroot(boxed(arg), ctx);
        }
#endif
        */
        argvals[ai] = julia_to_native(largty, jargty, arg, argi, addressOf,
                                      ai+1, ctx);
    }
    // the actual call
    Value *result = builder.CreateCall(llvmf,
                                       ArrayRef<Value*>(&argvals[0],(nargs-3)/2));
    if (cc != CallingConv::C)
        ((CallInst*)result)->setCallingConv(cc);

#ifdef LLVM32
    ((CallInst*)result)->setAttributes(AttrListPtr::get(getGlobalContext(), ArrayRef<AttributeWithIndex>(attrs)));
#else
    ((CallInst*)result)->setAttributes(AttrListPtr::get(attrs.data(),attrs.size()));
#endif
    // restore temp argument area stack pointer
    if (haspointers) {
        assert(saveloc != NULL);
        builder.CreateCall(restore_arg_area_loc_func, saveloc);
        assert(stacksave != NULL);
        builder.CreateCall(Intrinsic::getDeclaration(jl_Module,
                                                     Intrinsic::stackrestore),
                           stacksave);
    }
    ctx->argDepth = last_depth;
    if (0) { // Enable this to turn on SSPREQ (-fstack-protector) on the function containing this ccall
#ifdef LLVM32        
        ctx->f->addFnAttr(Attributes::StackProtectReq);
#else
        ctx->f->addFnAttr(Attribute::StackProtectReq);
#endif
    }

    JL_GC_POP();
    if (lrt == T_void)
        return literal_pointer_val((jl_value_t*)jl_nothing);
    return mark_julia_type(result, rt);
}
Пример #5
0
// ccall(pointer, rettype, (argtypes...), args...)
static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx)
{
    JL_NARGSV(ccall, 3);
    jl_value_t *ptr=NULL, *rt=NULL, *at=NULL;
    JL_GC_PUSH(&ptr, &rt, &at);
    ptr = jl_interpret_toplevel_expr_in(ctx->module, args[1],
                                        &jl_tupleref(ctx->sp,0),
                                        ctx->sp->length/2);
    rt  = jl_interpret_toplevel_expr_in(ctx->module, args[2],
                                        &jl_tupleref(ctx->sp,0),
                                        ctx->sp->length/2);
    if (jl_is_tuple(rt)) {
        std::string msg = "in " + ctx->funcName +
            ": ccall: missing return type";
        jl_error(msg.c_str());
    }
    at  = jl_interpret_toplevel_expr_in(ctx->module, args[3],
                                        &jl_tupleref(ctx->sp,0),
                                        ctx->sp->length/2);
    void *fptr;
    if (jl_is_symbol(ptr)) {
        // just symbol, default to JuliaDLHandle
        fptr = jl_dlsym(jl_dl_handle, ((jl_sym_t*)ptr)->name);
    }
    else {
        JL_TYPECHK(ccall, pointer, ptr);
        fptr = *(void**)jl_bits_data(ptr);
    }
    JL_TYPECHK(ccall, type, rt);
    JL_TYPECHK(ccall, tuple, at);
    JL_TYPECHK(ccall, type, at);
    jl_tuple_t *tt = (jl_tuple_t*)at;
    std::vector<Type *> fargt(0);
    std::vector<Type *> fargt_sig(0);
    Type *lrt = julia_type_to_llvm(rt, ctx);
    if (lrt == NULL) {
        JL_GC_POP();
        return literal_pointer_val(jl_nothing);
    }
    size_t i;
    bool haspointers = false;
    bool isVa = false;
    for(i=0; i < tt->length; i++) {
        jl_value_t *tti = jl_tupleref(tt,i);
        if (jl_is_seq_type(tti)) {
            isVa = true;
            tti = jl_tparam0(tti);
        }
        Type *t = julia_type_to_llvm(tti, ctx);
        if (t == NULL) {
            JL_GC_POP();
            return literal_pointer_val(jl_nothing);
        }
        fargt.push_back(t);
        if (!isVa)
            fargt_sig.push_back(t);
    }
    if ((!isVa && tt->length  != (nargs-2)/2) ||
        ( isVa && tt->length-1 > (nargs-2)/2))
        jl_error("ccall: wrong number of arguments to C function");

    // some special functions
    if (fptr == &jl_array_ptr) {
        Value *ary = emit_expr(args[4], ctx, true);
        JL_GC_POP();
        return mark_julia_type(builder.CreateBitCast(emit_arrayptr(ary),T_pint8),
                               rt);
    }

    // see if there are & arguments
    for(i=4; i < nargs+1; i+=2) {
        jl_value_t *argi = args[i];
        if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) {
            haspointers = true;
            break;
        }
    }

    // make LLVM function object for the target
    Function *llvmf =
        Function::Create(FunctionType::get(lrt, fargt_sig, isVa),
                         Function::ExternalLinkage,
                         "ccall_", jl_Module);
    jl_ExecutionEngine->addGlobalMapping(llvmf, fptr);

    // save temp argument area stack pointer
    Value *saveloc=NULL;
    Value *stacksave=NULL;
    if (haspointers) {
        // TODO: inline this
        saveloc = builder.CreateCall(save_arg_area_loc_func);
        stacksave =
            builder.CreateCall(Intrinsic::getDeclaration(jl_Module,
                                                         Intrinsic::stacksave));
    }

    // emit arguments
    Value *argvals[(nargs-3)/2];
    int last_depth = ctx->argDepth;
    int nargty = tt->length;
    for(i=4; i < nargs+1; i+=2) {
        int ai = (i-4)/2;
        jl_value_t *argi = args[i];
        bool addressOf = false;
        if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) {
            addressOf = true;
            argi = jl_exprarg(argi,0);
        }
        Value *arg = emit_expr(argi, ctx, true);
        Type *largty;
        jl_value_t *jargty;
        if (isVa && ai >= nargty-1) {
            largty = fargt[nargty-1];
            jargty = jl_tparam0(jl_tupleref(tt,nargty-1));
        }
        else {
            largty = fargt[ai];
            jargty = jl_tupleref(tt,ai);
        }
        /*
#ifdef JL_GC_MARKSWEEP
        // make sure args are rooted
        if (largty->isPointerTy() &&
            (largty == jl_pvalue_llvmt ||
             !jl_is_bits_type(expr_type(args[i], ctx)))) {
            make_gcroot(boxed(arg), ctx);
        }
#endif
        */
        argvals[ai] = julia_to_native(largty, jargty, arg, argi, addressOf,
                                      ai+1, ctx);
    }
    // the actual call
    Value *result = builder.CreateCall(llvmf,
                                       ArrayRef<Value*>(&argvals[0],(nargs-3)/2));

    // restore temp argument area stack pointer
    if (haspointers) {
        assert(saveloc != NULL);
        builder.CreateCall(restore_arg_area_loc_func, saveloc);
        assert(stacksave != NULL);
        builder.CreateCall(Intrinsic::getDeclaration(jl_Module,
                                                     Intrinsic::stackrestore),
                           stacksave);
    }
    ctx->argDepth = last_depth;

    JL_GC_POP();
    if (lrt == T_void)
        return literal_pointer_val((jl_value_t*)jl_nothing);
    return mark_julia_type(result, rt);
}
Пример #6
0
// ccall(pointer, rettype, (argtypes...), args...)
static Value *emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx)
{
    JL_NARGSV(ccall, 3);
    jl_value_t *ptr=NULL, *rt=NULL, *at=NULL;
    JL_GC_PUSH(&ptr, &rt, &at);
    ptr = jl_interpret_toplevel_expr_in(ctx->module, args[1],
                                        &jl_tupleref(ctx->sp,0),
                                        jl_tuple_len(ctx->sp)/2);
    rt  = jl_interpret_toplevel_expr_in(ctx->module, args[2],
                                        &jl_tupleref(ctx->sp,0),
                                        jl_tuple_len(ctx->sp)/2);
    if (jl_is_tuple(rt)) {
        std::string msg = "in " + ctx->funcName +
            ": ccall: missing return type";
        jl_error(msg.c_str());
    }
    at  = jl_interpret_toplevel_expr_in(ctx->module, args[3],
                                        &jl_tupleref(ctx->sp,0),
                                        jl_tuple_len(ctx->sp)/2);
    void *fptr=NULL;
    char *f_name=NULL, *f_lib=NULL;
    if (jl_is_tuple(ptr) && jl_tuple_len(ptr)==1) {
        ptr = jl_tupleref(ptr,0);
    }
    if (jl_is_symbol(ptr))
        f_name = ((jl_sym_t*)ptr)->name;
    else if (jl_is_byte_string(ptr))
        f_name = jl_string_data(ptr);
    if (f_name != NULL) {
        // just symbol, default to JuliaDLHandle
#ifdef __WIN32__
        fptr = jl_dlsym_e(jl_dl_handle, f_name);
        if (!fptr) {
            fptr = jl_dlsym_e(jl_kernel32_handle, f_name);
            if (!fptr) {
                fptr = jl_dlsym_e(jl_ntdll_handle, f_name);
                if (!fptr) {
                    fptr = jl_dlsym_e(jl_crtdll_handle, f_name);
                    if (!fptr) {
                        fptr = jl_dlsym(jl_winsock_handle, f_name);
                    }
                }
            }
        }
        else {
            // available in process symbol table
            fptr = NULL;
        }
#else
        // will look in process symbol table
#endif
    }
    else if (jl_is_cpointer_type(jl_typeof(ptr))) {
        fptr = *(void**)jl_bits_data(ptr);
    }
    else if (jl_is_tuple(ptr) && jl_tuple_len(ptr)>1) {
        jl_value_t *t0 = jl_tupleref(ptr,0);
        jl_value_t *t1 = jl_tupleref(ptr,1);
        if (jl_is_symbol(t0))
            f_name = ((jl_sym_t*)t0)->name;
        else if (jl_is_byte_string(t0))
            f_name = jl_string_data(t0);
        else
            JL_TYPECHK(ccall, symbol, t0);
        if (jl_is_symbol(t1))
            f_lib = ((jl_sym_t*)t1)->name;
        else if (jl_is_byte_string(t1))
            f_lib = jl_string_data(t1);
        else
            JL_TYPECHK(ccall, symbol, t1);
    }
    else {
        JL_TYPECHK(ccall, pointer, ptr);
    }
    if (f_name == NULL && fptr == NULL) {
        JL_GC_POP();
        emit_error("ccall: null function pointer", ctx);
        return literal_pointer_val(jl_nothing);
    }
    JL_TYPECHK(ccall, type, rt);
    JL_TYPECHK(ccall, tuple, at);
    JL_TYPECHK(ccall, type, at);
    jl_tuple_t *tt = (jl_tuple_t*)at;
    std::vector<Type *> fargt(0);
    std::vector<Type *> fargt_sig(0);
    Type *lrt = julia_type_to_llvm(rt);
    if (lrt == NULL) {
        JL_GC_POP();
        return literal_pointer_val(jl_nothing);
    }
    size_t i;
    bool haspointers = false;
    bool isVa = false;
    for(i=0; i < jl_tuple_len(tt); i++) {
        jl_value_t *tti = jl_tupleref(tt,i);
        if (jl_is_seq_type(tti)) {
            isVa = true;
            tti = jl_tparam0(tti);
        }
        Type *t = julia_type_to_llvm(tti);
        if (t == NULL) {
            JL_GC_POP();
            return literal_pointer_val(jl_nothing);
        }
        fargt.push_back(t);
        if (!isVa)
            fargt_sig.push_back(t);
    }
    // check for calling convention specifier
    CallingConv::ID cc = CallingConv::C;
    jl_value_t *last = args[nargs];
    if (jl_is_expr(last)) {
        jl_sym_t *lhd = ((jl_expr_t*)last)->head;
        if (lhd == jl_symbol("stdcall")) {
            cc = CallingConv::X86_StdCall;
            nargs--;
        }
        else if (lhd == jl_symbol("cdecl")) {
            cc = CallingConv::C;
            nargs--;
        }
        else if (lhd == jl_symbol("fastcall")) {
            cc = CallingConv::X86_FastCall;
            nargs--;
        }
    }
    
    if ((!isVa && jl_tuple_len(tt)  != (nargs-2)/2) ||
        ( isVa && jl_tuple_len(tt)-1 > (nargs-2)/2))
        jl_error("ccall: wrong number of arguments to C function");

    // some special functions
    if (fptr == &jl_array_ptr) {
        Value *ary = emit_expr(args[4], ctx);
        JL_GC_POP();
        return mark_julia_type(builder.CreateBitCast(emit_arrayptr(ary),lrt),
                               rt);
    }

    // see if there are & arguments
    for(i=4; i < nargs+1; i+=2) {
        jl_value_t *argi = args[i];
        if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) {
            haspointers = true;
            break;
        }
    }

    // make LLVM function object for the target
    Constant *llvmf;
    FunctionType *functype = FunctionType::get(lrt, fargt_sig, isVa);
    
    if (fptr != NULL) {
        Type *funcptype = PointerType::get(functype,0);
        llvmf = ConstantExpr::getIntToPtr( 
            ConstantInt::get(funcptype, (uint64_t)fptr), 
            funcptype);
    }
    else {
        if (f_lib != NULL)
            add_library_sym(f_name, f_lib);
        llvmf = jl_Module->getOrInsertFunction(f_name, functype);
    }

    // save temp argument area stack pointer
    Value *saveloc=NULL;
    Value *stacksave=NULL;
    if (haspointers) {
        // TODO: inline this
        saveloc = builder.CreateCall(save_arg_area_loc_func);
        stacksave =
            builder.CreateCall(Intrinsic::getDeclaration(jl_Module,
                                                         Intrinsic::stacksave));
    }

    // emit arguments
    Value *argvals[(nargs-3)/2];
    int last_depth = ctx->argDepth;
    int nargty = jl_tuple_len(tt);
    for(i=4; i < nargs+1; i+=2) {
        int ai = (i-4)/2;
        jl_value_t *argi = args[i];
        bool addressOf = false;
        if (jl_is_expr(argi) && ((jl_expr_t*)argi)->head == amp_sym) {
            addressOf = true;
            argi = jl_exprarg(argi,0);
        }
        Type *largty;
        jl_value_t *jargty;
        if (isVa && ai >= nargty-1) {
            largty = fargt[nargty-1];
            jargty = jl_tparam0(jl_tupleref(tt,nargty-1));
        }
        else {
            largty = fargt[ai];
            jargty = jl_tupleref(tt,ai);
        }
        Value *arg;
        if (largty == jl_pvalue_llvmt) {
            arg = emit_expr(argi, ctx, true);
        }
        else {
            arg = emit_unboxed(argi, ctx);
            if (jl_is_bits_type(expr_type(argi, ctx))) {
                if (addressOf)
                    arg = emit_unbox(largty->getContainedType(0), largty, arg);
                else
                    arg = emit_unbox(largty, PointerType::get(largty,0), arg);
            }
        }
        /*
#ifdef JL_GC_MARKSWEEP
        // make sure args are rooted
        if (largty->isPointerTy() &&
            (largty == jl_pvalue_llvmt ||
             !jl_is_bits_type(expr_type(args[i], ctx)))) {
            make_gcroot(boxed(arg), ctx);
        }
#endif
        */
        argvals[ai] = julia_to_native(largty, jargty, arg, argi, addressOf,
                                      ai+1, ctx);
    }
    // the actual call
    Value *result = builder.CreateCall(llvmf,
                                       ArrayRef<Value*>(&argvals[0],(nargs-3)/2));
    if (cc != CallingConv::C)
        ((CallInst*)result)->setCallingConv(cc);

    // restore temp argument area stack pointer
    if (haspointers) {
        assert(saveloc != NULL);
        builder.CreateCall(restore_arg_area_loc_func, saveloc);
        assert(stacksave != NULL);
        builder.CreateCall(Intrinsic::getDeclaration(jl_Module,
                                                     Intrinsic::stackrestore),
                           stacksave);
    }
    ctx->argDepth = last_depth;
    if (0) { // Enable this to turn on SSPREQ (-fstack-protector) on the function containing this ccall
        ctx->f->addFnAttr(Attribute::StackProtectReq);
    }

    JL_GC_POP();
    if (lrt == T_void)
        return literal_pointer_val((jl_value_t*)jl_nothing);
    return mark_julia_type(result, rt);
}
Пример #7
0
Файл: gf.c Проект: cshen/julia
static int is_va_tuple(jl_tuple_t *t)
{
    return (t->length>0 && jl_is_seq_type(jl_tupleref(t,t->length-1)));
}
Пример #8
0
Файл: gf.c Проект: cshen/julia
static jl_function_t *cache_method(jl_methtable_t *mt, jl_tuple_t *type,
                                   jl_function_t *method, jl_tuple_t *decl,
                                   jl_tuple_t *sparams)
{
    size_t i;
    int need_dummy_entries = 0;
    jl_value_t *temp=NULL;
    jl_function_t *newmeth=NULL;
    JL_GC_PUSH(&type, &temp, &newmeth);

    for (i=0; i < type->length; i++) {
        jl_value_t *elt = jl_tupleref(type,i);
        int set_to_any = 0;
        if (nth_slot_type(decl,i) == jl_ANY_flag) {
            // don't specialize on slots marked ANY
            temp = jl_tupleref(type, i);
            jl_tupleset(type, i, (jl_value_t*)jl_any_type);
            int nintr=0;
            jl_methlist_t *curr = mt->defs;
            // if this method is the only match even with the current slot
            // set to Any, then it is safe to cache it that way.
            while (curr != NULL && curr->func!=method) {
                if (jl_type_intersection((jl_value_t*)curr->sig,
                                         (jl_value_t*)type) !=
                    (jl_value_t*)jl_bottom_type) {
                    nintr++;
                    break;
                }
                curr = curr->next;
            }
            if (nintr) {
                // TODO: even if different specializations of this slot need
                // separate cache entries, have them share code.
                jl_tupleset(type, i, temp);
            }
            else {
                set_to_any = 1;
            }
        }
        if (set_to_any) {
        }
        else if (jl_is_tuple(elt)) {
            /*
              don't cache tuple type exactly; just remember that it was
              a tuple, unless the declaration asks for something more
              specific. determined with a type intersection.
            */
            int might_need_dummy=0;
            temp = jl_tupleref(type, i);
            if (i < decl->length) {
                jl_value_t *declt = jl_tupleref(decl,i);
                // for T..., intersect with T
                if (jl_is_seq_type(declt))
                    declt = jl_tparam0(declt);
                if (declt == (jl_value_t*)jl_tuple_type ||
                    jl_subtype((jl_value_t*)jl_tuple_type, declt, 0)) {
                    // don't specialize args that matched (Any...) or Any
                    jl_tupleset(type, i, (jl_value_t*)jl_tuple_type);
                    might_need_dummy = 1;
                }
                else {
                    declt = jl_type_intersection(declt,
                                                 (jl_value_t*)jl_tuple_type);
                    if (((jl_tuple_t*)elt)->length > 3 ||
                        tuple_all_Any((jl_tuple_t*)declt)) {
                        jl_tupleset(type, i, declt);
                        might_need_dummy = 1;
                    }
                }
            }
            else {
                jl_tupleset(type, i, (jl_value_t*)jl_tuple_type);
                might_need_dummy = 1;
            }
            assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type);
            if (might_need_dummy) {
                jl_methlist_t *curr = mt->defs;
                // can't generalize type if there's an overlapping definition
                // with typevars
                while (curr != NULL && curr->func!=method) {
                    if (curr->tvars!=jl_null &&
                        jl_type_intersection((jl_value_t*)curr->sig,
                                             (jl_value_t*)type) !=
                        (jl_value_t*)jl_bottom_type) {
                        jl_tupleset(type, i, temp);
                        might_need_dummy = 0;
                        break;
                    }
                    curr = curr->next;
                }
            }
            if (might_need_dummy) {
                jl_methlist_t *curr = mt->defs;
                while (curr != NULL && curr->func!=method) {
                    jl_tuple_t *sig = curr->sig;
                    if (sig->length > i &&
                        jl_is_tuple(jl_tupleref(sig,i))) {
                        need_dummy_entries = 1;
                        break;
                    }
                    curr = curr->next;
                }
            }
        }
        else if (jl_is_type_type(elt) && jl_is_type_type(jl_tparam0(elt))) {
            /*
              actual argument was Type{...}, we computed its type as
              Type{Type{...}}. we must avoid unbounded nesting here, so
              cache the signature as Type{T}, unless something more
              specific like Type{Type{Int32}} was actually declared.
              this can be determined using a type intersection.
            */
            if (i < decl->length) {
                jl_value_t *declt = jl_tupleref(decl,i);
                // for T..., intersect with T
                if (jl_is_seq_type(declt))
                    declt = jl_tparam0(declt);
                jl_tupleset(type, i,
                            jl_type_intersection(declt, (jl_value_t*)jl_typetype_type));
            }
            else {
                jl_tupleset(type, i, (jl_value_t*)jl_typetype_type);
            }
            assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type);
        }
        else if (jl_is_type_type(elt) &&
                 very_general_type(nth_slot_type(decl,i))) {
            /*
              here's a fairly complex heuristic: if this argument slot's
              declared type is Any, and no definition overlaps with Type
              for this slot, then don't specialize for every Type that
              might be passed.
              Since every type x has its own type Type{x}, this would be
              excessive specialization for an Any slot.
            */
            int ok=1;
            jl_methlist_t *curr = mt->defs;
            while (curr != NULL) {
                jl_value_t *slottype = nth_slot_type(curr->sig, i);
                if (slottype &&
                    !very_general_type(slottype) &&
                    jl_type_intersection(slottype,
                                         (jl_value_t*)jl_type_type) !=
                    (jl_value_t*)jl_bottom_type) {
                    ok=0;
                    break;
                }
                curr = curr->next;
            }
            if (ok) {
                jl_tupleset(type, i, (jl_value_t*)jl_typetype_type);
            }
        }
    }

    // for varargs methods, only specialize up to max_args.
    // in general, here we want to find the biggest type that's not a
    // supertype of any other method signatures. so far we are conservative
    // and the types we find should be bigger.
    if (type->length > jl_unbox_long(mt->max_args) &&
        jl_is_seq_type(jl_tupleref(decl,decl->length-1))) {
        size_t nspec = jl_unbox_long(mt->max_args)+2;
        jl_tuple_t *limited = jl_alloc_tuple(nspec);
        for(i=0; i < nspec-1; i++) {
            jl_tupleset(limited, i, jl_tupleref(type, i));
        }
        jl_value_t *lasttype = jl_tupleref(type,i-1);
        // if all subsequent arguments are subtypes of lasttype, specialize
        // on that instead of decl. for example, if decl is
        // (Any...)
        // and type is
        // (Symbol, Symbol, Symbol)
        // then specialize as (Symbol...), but if type is
        // (Symbol, Int32, Expr)
        // then specialize as (Any...)
        size_t j = i;
        int all_are_subtypes=1;
        for(; j < type->length; j++) {
            if (!jl_subtype(jl_tupleref(type,j), lasttype, 0)) {
                all_are_subtypes = 0;
                break;
            }
        }
        type = limited;
        if (all_are_subtypes) {
            // avoid Type{Type{...}...}...
            if (jl_is_type_type(lasttype))
                lasttype = (jl_value_t*)jl_type_type;
            temp = (jl_value_t*)jl_tuple1(lasttype);
            jl_tupleset(type, i, jl_apply_type((jl_value_t*)jl_seq_type,
                                               (jl_tuple_t*)temp));
        }
        else {
            jl_value_t *lastdeclt = jl_tupleref(decl,decl->length-1);
            if (sparams->length > 0) {
                lastdeclt = (jl_value_t*)
                    jl_instantiate_type_with((jl_type_t*)lastdeclt,
                                             sparams->data,
                                             sparams->length/2);
            }
            jl_tupleset(type, i, lastdeclt);
        }
        // now there is a problem: the computed signature is more
        // general than just the given arguments, so it might conflict
        // with another definition that doesn't have cache instances yet.
        // to fix this, we insert dummy cache entries for all intersections
        // of this signature and definitions. those dummy entries will
        // supersede this one in conflicted cases, alerting us that there
        // should actually be a cache miss.
        need_dummy_entries = 1;
    }

    if (need_dummy_entries) {
        temp = ml_matches(mt->defs, (jl_value_t*)type, lambda_sym, -1);
        for(i=0; i < jl_array_len(temp); i++) {
            jl_value_t *m = jl_cellref(temp, i);
            if (jl_tupleref(m,2) != (jl_value_t*)method->linfo) {
                jl_method_cache_insert(mt, (jl_tuple_t*)jl_tupleref(m, 0),
                                       NULL);
            }
        }
    }

    // here we infer types and specialize the method
    /*
    if (sparams==jl_null)
        newmeth = method;
    else
    */
    jl_array_t *lilist=NULL;
    jl_lambda_info_t *li=NULL;
    if (method->linfo && method->linfo->specializations!=NULL) {
        // reuse code already generated for this combination of lambda and
        // arguments types. this happens for inner generic functions where
        // a new closure is generated on each call to the enclosing function.
        lilist = method->linfo->specializations;
        int k;
        for(k=0; k < lilist->length; k++) {
            li = (jl_lambda_info_t*)jl_cellref(lilist, k);
            if (jl_types_equal(li->specTypes, (jl_value_t*)type))
                break;
        }
        if (k == lilist->length) lilist=NULL;
    }
    if (lilist != NULL && !li->inInference) {
        assert(li);
        newmeth = jl_reinstantiate_method(method, li);
        (void)jl_method_cache_insert(mt, type, newmeth);
        JL_GC_POP();
        return newmeth;
    }
    else {
        newmeth = jl_instantiate_method(method, sparams);
    }
    /*
      if "method" itself can ever be compiled, for example for use as
      an unspecialized method (see below), then newmeth->fptr might point
      to some slow compiled code instead of jl_trampoline, meaning our
      type-inferred code would never get compiled. this can be fixed with
      the commented-out snippet below.
    */
    assert(!(newmeth->linfo && newmeth->linfo->ast) ||
           newmeth->fptr == &jl_trampoline);
    /*
    if (newmeth->linfo&&newmeth->linfo->ast&&newmeth->fptr!=&jl_trampoline) {
        newmeth->fptr = &jl_trampoline;
    }
    */

    (void)jl_method_cache_insert(mt, type, newmeth);

    if (newmeth->linfo != NULL && newmeth->linfo->sparams == jl_null) {
        // when there are no static parameters, one unspecialized version
        // of a function can be shared among all cached specializations.
        if (method->linfo->unspecialized == NULL) {
            method->linfo->unspecialized =
                jl_instantiate_method(method, jl_null);
        }
        newmeth->linfo->unspecialized = method->linfo->unspecialized;
    }

    if (newmeth->linfo != NULL && newmeth->linfo->ast != NULL) {
        newmeth->linfo->specTypes = (jl_value_t*)type;
        jl_array_t *spe = method->linfo->specializations;
        if (spe == NULL) {
            spe = jl_alloc_cell_1d(1);
            jl_cellset(spe, 0, newmeth->linfo);
        }
        else {
            jl_cell_1d_push(spe, (jl_value_t*)newmeth->linfo);
        }
        method->linfo->specializations = spe;
        jl_type_infer(newmeth->linfo, type, method->linfo);
    }
    JL_GC_POP();
    return newmeth;
}