jl_tuple_t *jl_tuple_tvars_to_symbols(jl_tuple_t *t) { jl_tuple_t *s = jl_alloc_tuple_uninit(jl_tuple_len(t)); size_t i; for(i=0; i < jl_tuple_len(s); i+=2) { assert(jl_is_typevar(jl_tupleref(t,i))); jl_tupleset(s, i, (jl_value_t*)((jl_tvar_t*)jl_tupleref(t,i))->name); jl_tupleset(s, i+1, jl_tupleref(t,i+1)); } return s; }
void jl_add_constructors(jl_datatype_t *t) { if (t->name == jl_array_typename) { t->fptr = jl_f_no_function; return; } jl_initialize_generic_function((jl_function_t*)t, t->name->name); if (t->name->ctor_factory == (jl_value_t*)jl_nothing || t->name->ctor_factory == (jl_value_t*)jl_null) { } else { assert(jl_tuple_len(t->parameters) > 0); if (t == (jl_datatype_t*)t->name->primary) return; jl_function_t *cfactory = NULL; jl_tuple_t *env = NULL; JL_GC_PUSH2(&cfactory, &env); if (jl_compileropts.compile_enabled) { // instantiating assert(jl_is_function(t->name->ctor_factory)); // add type's static parameters to the ctor factory size_t np = jl_tuple_len(t->parameters); env = jl_alloc_tuple_uninit(np*2); for(size_t i=0; i < np; i++) { jl_tupleset(env, i*2+0, jl_tupleref(((jl_datatype_t*)t->name->primary)->parameters, i)); jl_tupleset(env, i*2+1, jl_tupleref(t->parameters, i)); } cfactory = jl_instantiate_method((jl_function_t*)t->name->ctor_factory, env); cfactory->linfo->ast = jl_prepare_ast(cfactory->linfo, cfactory->linfo->sparams); } else { cfactory = ((jl_datatype_t*)t)->name->static_ctor_factory; if (cfactory == NULL) { JL_PRINTF(JL_STDERR,"code missing for type %s\n", t->name->name); exit(1); } // in generically-compiled case, pass static parameters via closure // environment. env = jl_tuple_append((jl_tuple_t*)cfactory->env, t->parameters); cfactory = jl_new_closure(cfactory->fptr, (jl_value_t*)env, cfactory->linfo); } // call user-defined constructor factory on (type,) jl_value_t *cfargs[1] = { (jl_value_t*)t }; jl_apply(cfactory, cfargs, 1); JL_GC_POP(); } }
jl_tuple_t *jl_tuple2(void *a, void *b) { #ifdef OVERLAP_TUPLE_LEN jl_tuple_t *t = (jl_tuple_t*)alloc_3w(); #else jl_tuple_t *t = (jl_tuple_t*)alloc_4w(); #endif t->type = (jl_value_t*)jl_tuple_type; jl_tuple_set_len_unsafe(t, 2); jl_tupleset(t, 0, a); jl_tupleset(t, 1, b); return t; }
jl_tuple_t *jl_tuple_append(jl_tuple_t *a, jl_tuple_t *b) { jl_tuple_t *c = jl_alloc_tuple_uninit(jl_tuple_len(a) + jl_tuple_len(b)); size_t i=0, j; for(j=0; j < jl_tuple_len(a); j++) { jl_tupleset(c, i, jl_tupleref(a,j)); i++; } for(j=0; j < jl_tuple_len(b); j++) { jl_tupleset(c, i, jl_tupleref(b,j)); i++; } return c; }
jl_tuple_t *jl_tuple_append(jl_tuple_t *a, jl_tuple_t *b) { jl_tuple_t *c = jl_alloc_tuple_uninit(a->length + b->length); size_t i=0, j; for(j=0; j < a->length; j++) { jl_tupleset(c, i, jl_tupleref(a,j)); i++; } for(j=0; j < b->length; j++) { jl_tupleset(c, i, jl_tupleref(b,j)); i++; } return c; }
jl_tuple_t *jl_tuple1(void *a) { jl_tuple_t *t = (jl_tuple_t*)alloc_3w(); t->type = (jl_type_t*)jl_tuple_type; jl_tuple_set_len_unsafe(t, 1); jl_tupleset(t, 0, a); return t; }
jl_tuple_t *jl_tuple1(void *a) { jl_tuple_t *t = (jl_tuple_t*)alloc_3w(); t->type = (jl_type_t*)jl_tuple_type; t->length = 1; jl_tupleset(t, 0, a); return t; }
jl_tuple_t *jl_tuple_fill(size_t n, jl_value_t *v) { if (n==0) return jl_null; jl_tuple_t *tup = jl_alloc_tuple_uninit(n); for(size_t i=0; i < n; i++) { jl_tupleset(tup, i, v); } return tup; }
jl_tuple_t *jl_alloc_tuple(size_t n) { if (n == 0) return jl_null; jl_tuple_t *jv = jl_alloc_tuple_uninit(n); for(size_t i=0; i < n; i++) { jl_tupleset(jv, i, NULL); } return jv; }
void jl_add_constructors(jl_struct_type_t *t) { if (t->name == jl_array_typename) { t->fptr = jl_f_no_function; return; } jl_initialize_generic_function((jl_function_t*)t, t->name->name); if (t->ctor_factory == (jl_value_t*)jl_nothing || t->ctor_factory == (jl_value_t*)jl_null) { assert(jl_tuple_len(t->parameters) == 0); } else { assert(jl_tuple_len(t->parameters) > 0); if (t != (jl_struct_type_t*)t->name->primary) { // instantiating assert(jl_is_function(t->ctor_factory)); // add type's static parameters to the ctor factory size_t np = jl_tuple_len(t->parameters); jl_tuple_t *sparams = jl_alloc_tuple_uninit(np*2); jl_function_t *cfactory = NULL; JL_GC_PUSH(&sparams, &cfactory); size_t i; for(i=0; i < np; i++) { jl_tupleset(sparams, i*2+0, jl_tupleref(((jl_struct_type_t*)t->name->primary)->parameters, i)); jl_tupleset(sparams, i*2+1, jl_tupleref(t->parameters, i)); } cfactory = jl_instantiate_method((jl_function_t*)t->ctor_factory, sparams); cfactory->linfo->ast = jl_prepare_ast(cfactory->linfo, cfactory->linfo->sparams); // call user-defined constructor factory on (type,) jl_value_t *cfargs[1] = { (jl_value_t*)t }; jl_apply(cfactory, cfargs, 1); JL_GC_POP(); } } }
static jl_value_t *jl_new_bits_internal(jl_value_t *dt, void *data, size_t *len) { if (jl_is_tuple(dt)) { jl_tuple_t *tuple = (jl_tuple_t*)dt; *len = LLT_ALIGN(*len, jl_new_bits_align(dt)); size_t i, l = jl_tuple_len(tuple); jl_value_t *v = (jl_value_t*) jl_alloc_tuple(l); JL_GC_PUSH1(v); for (i = 0; i < l; i++) { jl_tupleset(v,i,jl_new_bits_internal(jl_tupleref(tuple,i), (char*)data, len)); } JL_GC_POP(); return v; } jl_datatype_t *bt = (jl_datatype_t*)dt; size_t nb = jl_datatype_size(bt); if (nb == 0) return jl_new_struct_uninit(bt); *len = LLT_ALIGN(*len, bt->alignment); data = (char*)data + (*len); *len += nb; if (bt == jl_uint8_type) return jl_box_uint8(*(uint8_t*)data); if (bt == jl_int64_type) return jl_box_int64(*(int64_t*)data); if (bt == jl_bool_type) return (*(int8_t*)data) ? jl_true:jl_false; if (bt == jl_int32_type) return jl_box_int32(*(int32_t*)data); if (bt == jl_float64_type) return jl_box_float64(*(double*)data); jl_value_t *v = (jl_value_t*)allocobj((NWORDS(LLT_ALIGN(nb,sizeof(void*)))+1)* sizeof(void*)); v->type = (jl_value_t*)bt; switch (nb) { case 1: *(int8_t*) jl_data_ptr(v) = *(int8_t*)data; break; case 2: *(int16_t*) jl_data_ptr(v) = *(int16_t*)data; break; case 4: *(int32_t*) jl_data_ptr(v) = *(int32_t*)data; break; case 8: *(int64_t*) jl_data_ptr(v) = *(int64_t*)data; break; case 16: *(bits128_t*)jl_data_ptr(v) = *(bits128_t*)data; break; default: memcpy(jl_data_ptr(v), data, nb); } return v; }
static jl_value_t *jl_deserialize_value(ios_t *s) { int pos = ios_pos(s); int32_t tag = read_uint8(s); if (tag == Null_tag) return NULL; if (tag == 0) { tag = read_uint8(s); return (jl_value_t*)ptrhash_get(&deser_tag, (void*)(ptrint_t)tag); } if (tag == BackRef_tag) { assert(tree_literal_values == NULL); ptrint_t offs = read_int32(s); void **bp = ptrhash_bp(&backref_table, (void*)(ptrint_t)offs); assert(*bp != HT_NOTFOUND); return (jl_value_t*)*bp; } jl_value_t *vtag=(jl_value_t*)ptrhash_get(&deser_tag,(void*)(ptrint_t)tag); if (tag >= VALUE_TAGS) { return vtag; } int usetable = (tree_literal_values == NULL); size_t i; if (vtag == (jl_value_t*)jl_tuple_type || vtag == (jl_value_t*)LongTuple_tag) { size_t len; if (vtag == (jl_value_t*)jl_tuple_type) len = read_uint8(s); else len = read_int32(s); jl_tuple_t *tu = jl_alloc_tuple_uninit(len); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, (jl_value_t*)tu); for(i=0; i < len; i++) jl_tupleset(tu, i, jl_deserialize_value(s)); return (jl_value_t*)tu; } else if (vtag == (jl_value_t*)jl_symbol_type || vtag == (jl_value_t*)LongSymbol_tag) { size_t len; if (vtag == (jl_value_t*)jl_symbol_type) len = read_uint8(s); else len = read_int32(s); char *name = alloca(len+1); ios_read(s, name, len); name[len] = '\0'; jl_value_t *s = (jl_value_t*)jl_symbol(name); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, s); return s; } else if (vtag == (jl_value_t*)jl_array_type) { jl_value_t *aty = jl_deserialize_value(s); jl_value_t *elty = jl_tparam0(aty); int16_t ndims = jl_unbox_long(jl_tparam1(aty)); size_t *dims = alloca(ndims*sizeof(size_t)); for(i=0; i < ndims; i++) dims[i] = jl_unbox_long(jl_deserialize_value(s)); jl_array_t *a = jl_new_array_((jl_type_t*)aty, ndims, dims); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, (jl_value_t*)a); if (jl_is_bits_type(elty)) { size_t tot = a->length * a->elsize; ios_read(s, a->data, tot); } else { for(i=0; i < a->length; i++) { ((jl_value_t**)a->data)[i] = jl_deserialize_value(s); } } return (jl_value_t*)a; } else if (vtag == (jl_value_t*)jl_expr_type || vtag == (jl_value_t*)LongExpr_tag) { size_t len; if (vtag == (jl_value_t*)jl_expr_type) len = read_uint8(s); else len = read_int32(s); jl_expr_t *e = jl_exprn((jl_sym_t*)jl_deserialize_value(s), len); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, (jl_value_t*)e); e->etype = jl_deserialize_value(s); for(i=0; i < len; i++) { jl_cellset(e->args, i, jl_deserialize_value(s)); } return (jl_value_t*)e; } else if (vtag == (jl_value_t*)LiteralVal_tag) { return jl_cellref(tree_literal_values, read_uint16(s)); } else if (vtag == (jl_value_t*)jl_tvar_type) { jl_tvar_t *tv = (jl_tvar_t*)newobj((jl_type_t*)jl_tvar_type, 4); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, tv); tv->name = (jl_sym_t*)jl_deserialize_value(s); tv->lb = jl_deserialize_value(s); tv->ub = jl_deserialize_value(s); tv->bound = read_int8(s); return (jl_value_t*)tv; } else if (vtag == (jl_value_t*)jl_func_kind) { jl_value_t *ftype = jl_deserialize_value(s); jl_function_t *f = (jl_function_t*)newobj((jl_type_t*)ftype, 3); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, f); f->linfo = (jl_lambda_info_t*)jl_deserialize_value(s); f->env = jl_deserialize_value(s); f->fptr = jl_deserialize_fptr(s); if (f->fptr == NULL) { f->fptr = &jl_trampoline; } return (jl_value_t*)f; } else if (vtag == (jl_value_t*)jl_lambda_info_type) { jl_lambda_info_t *li = (jl_lambda_info_t*)newobj((jl_type_t*)jl_lambda_info_type, LAMBDA_INFO_NW); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, li); li->ast = jl_deserialize_value(s); li->sparams = (jl_tuple_t*)jl_deserialize_value(s); li->tfunc = jl_deserialize_value(s); li->name = (jl_sym_t*)jl_deserialize_value(s); li->specTypes = jl_deserialize_value(s); li->specializations = (jl_array_t*)jl_deserialize_value(s); li->inferred = jl_deserialize_value(s); li->file = jl_deserialize_value(s); li->line = jl_deserialize_value(s); li->module = (jl_module_t*)jl_deserialize_value(s); li->fptr = NULL; li->roots = NULL; li->functionObject = NULL; li->inInference = 0; li->inCompile = 0; li->unspecialized = NULL; return (jl_value_t*)li; } else if (vtag == (jl_value_t*)jl_module_type) { jl_module_t *m = jl_new_module(anonymous_sym); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, m); m->name = (jl_sym_t*)jl_deserialize_value(s); while (1) { jl_value_t *name = jl_deserialize_value(s); if (name == NULL) break; jl_binding_t *b = jl_get_binding_wr(m, (jl_sym_t*)name); b->value = jl_deserialize_value(s); b->type = (jl_type_t*)jl_deserialize_value(s); b->constp = read_int8(s); b->exportp = read_int8(s); } while (1) { jl_value_t *name = jl_deserialize_value(s); if (name == NULL) break; jl_set_expander(m, (jl_sym_t*)name, (jl_function_t*)jl_deserialize_value(s)); } return (jl_value_t*)m; } else if (vtag == (jl_value_t*)jl_methtable_type) { jl_methtable_t *mt = (jl_methtable_t*)allocobj(sizeof(jl_methtable_t)); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, mt); mt->type = (jl_type_t*)jl_methtable_type; mt->defs = jl_deserialize_methlist(s); mt->cache = jl_deserialize_methlist(s); mt->cache_1arg = (jl_array_t*)jl_deserialize_value(s); mt->max_args = read_int32(s); return (jl_value_t*)mt; } else if (vtag == (jl_value_t*)SmallInt64_tag) { jl_value_t *v = jl_box_int64(read_int32(s)); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, v); return v; } else if (vtag == (jl_value_t*)jl_bits_kind) { jl_bits_type_t *bt = (jl_bits_type_t*)jl_deserialize_value(s); int nby = bt->nbits/8; char *data = alloca(nby); ios_read(s, data, nby); jl_value_t *v=NULL; if (bt == jl_int32_type) v = jl_box_int32(*(int32_t*)data); else if (bt == jl_int64_type) v = jl_box_int64(*(int64_t*)data); else if (bt == jl_bool_type) v = jl_box_bool(*(int8_t*)data); else { switch (bt->nbits) { case 8: v = jl_box8 (bt, *(int8_t*) data); break; case 16: v = jl_box16(bt, *(int16_t*)data); break; case 32: v = jl_box32(bt, *(int32_t*)data); break; case 64: v = jl_box64(bt, *(int64_t*)data); break; default: v = (jl_value_t*)allocobj(sizeof(void*)+nby); v->type = (jl_type_t*)bt; memcpy(jl_bits_data(v), data, nby); } } if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, v); return v; } else if (vtag == (jl_value_t*)jl_struct_kind) { jl_struct_type_t *typ = (jl_struct_type_t*)jl_deserialize_value(s); if (typ == jl_struct_kind || typ == jl_bits_kind) return jl_deserialize_tag_type(s, typ, pos); size_t nf = typ->names->length; jl_value_t *v = jl_new_struct_uninit(typ); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, v); for(i=0; i < nf; i++) { ((jl_value_t**)v)[i+1] = jl_deserialize_value(s); } // TODO: put WeakRefs on the weak_refs list return v; } else if (vtag == (jl_value_t*)jl_tag_kind) { return jl_deserialize_tag_type(s, jl_tag_kind, pos); } assert(0); return NULL; }
static jl_value_t *jl_deserialize_value(ios_t *s) { int pos = ios_pos(s); int32_t tag = read_uint8(s); if (tag == Null_tag) return NULL; if (tag == 0) { tag = read_uint8(s); jl_value_t *v = ptrhash_get(&deser_tag, (void*)(ptrint_t)tag); assert(v != HT_NOTFOUND); return v; } if (tag == BackRef_tag || tag == ShortBackRef_tag) { assert(tree_literal_values == NULL); ptrint_t offs = (tag == BackRef_tag) ? read_int32(s) : read_uint16(s); void **bp = ptrhash_bp(&backref_table, (void*)(ptrint_t)offs); assert(*bp != HT_NOTFOUND); return (jl_value_t*)*bp; } jl_value_t *vtag=(jl_value_t*)ptrhash_get(&deser_tag,(void*)(ptrint_t)tag); if (tag >= VALUE_TAGS) { return vtag; } int usetable = (tree_literal_values == NULL); size_t i; if (vtag == (jl_value_t*)jl_tuple_type || vtag == (jl_value_t*)LongTuple_tag) { size_t len; if (vtag == (jl_value_t*)jl_tuple_type) len = read_uint8(s); else len = read_int32(s); jl_tuple_t *tu = jl_alloc_tuple_uninit(len); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, (jl_value_t*)tu); for(i=0; i < len; i++) jl_tupleset(tu, i, jl_deserialize_value(s)); return (jl_value_t*)tu; } else if (vtag == (jl_value_t*)jl_symbol_type || vtag == (jl_value_t*)LongSymbol_tag) { size_t len; if (vtag == (jl_value_t*)jl_symbol_type) len = read_uint8(s); else len = read_int32(s); char *name = alloca(len+1); ios_read(s, name, len); name[len] = '\0'; jl_value_t *s = (jl_value_t*)jl_symbol(name); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, s); return s; } else if (vtag == (jl_value_t*)jl_array_type) { jl_value_t *aty = jl_deserialize_value(s); jl_value_t *elty = jl_tparam0(aty); int16_t ndims = jl_unbox_long(jl_tparam1(aty)); size_t *dims = alloca(ndims*sizeof(size_t)); for(i=0; i < ndims; i++) dims[i] = jl_unbox_long(jl_deserialize_value(s)); jl_array_t *a = jl_new_array_((jl_type_t*)aty, ndims, dims); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, (jl_value_t*)a); if (jl_is_bits_type(elty)) { size_t tot = jl_array_len(a) * a->elsize; ios_read(s, jl_array_data(a), tot); } else { for(i=0; i < jl_array_len(a); i++) { ((jl_value_t**)a->data)[i] = jl_deserialize_value(s); } } return (jl_value_t*)a; } else if (vtag == (jl_value_t*)jl_expr_type || vtag == (jl_value_t*)LongExpr_tag) { size_t len; if (vtag == (jl_value_t*)jl_expr_type) len = read_uint8(s); else len = read_int32(s); jl_expr_t *e = jl_exprn((jl_sym_t*)jl_deserialize_value(s), len); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, (jl_value_t*)e); e->etype = jl_deserialize_value(s); for(i=0; i < len; i++) { jl_cellset(e->args, i, jl_deserialize_value(s)); } return (jl_value_t*)e; } else if (vtag == (jl_value_t*)LiteralVal_tag) { return jl_cellref(tree_literal_values, read_uint16(s)); } else if (vtag == (jl_value_t*)jl_tvar_type) { jl_tvar_t *tv = (jl_tvar_t*)newobj((jl_type_t*)jl_tvar_type, 4); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, tv); tv->name = (jl_sym_t*)jl_deserialize_value(s); tv->lb = jl_deserialize_value(s); tv->ub = jl_deserialize_value(s); tv->bound = read_int8(s); return (jl_value_t*)tv; } else if (vtag == (jl_value_t*)jl_function_type) { jl_function_t *f = (jl_function_t*)newobj((jl_type_t*)jl_function_type, 3); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, f); f->linfo = (jl_lambda_info_t*)jl_deserialize_value(s); f->env = jl_deserialize_value(s); f->fptr = jl_deserialize_fptr(s); return (jl_value_t*)f; } else if (vtag == (jl_value_t*)jl_lambda_info_type) { jl_lambda_info_t *li = (jl_lambda_info_t*)newobj((jl_type_t*)jl_lambda_info_type, LAMBDA_INFO_NW); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, li); li->ast = jl_deserialize_value(s); li->sparams = (jl_tuple_t*)jl_deserialize_value(s); li->tfunc = jl_deserialize_value(s); li->name = (jl_sym_t*)jl_deserialize_value(s); li->specTypes = (jl_tuple_t*)jl_deserialize_value(s); li->specializations = (jl_array_t*)jl_deserialize_value(s); li->inferred = read_int8(s); li->file = jl_deserialize_value(s); li->line = read_int32(s); li->module = (jl_module_t*)jl_deserialize_value(s); li->roots = (jl_array_t*)jl_deserialize_value(s); li->def = (jl_lambda_info_t*)jl_deserialize_value(s); li->capt = jl_deserialize_value(s); li->fptr = &jl_trampoline; li->functionObject = NULL; li->cFunctionObject = NULL; li->inInference = 0; li->inCompile = 0; li->unspecialized = NULL; return (jl_value_t*)li; } else if (vtag == (jl_value_t*)jl_module_type) { jl_sym_t *mname = (jl_sym_t*)jl_deserialize_value(s); jl_module_t *m = jl_new_module(mname); if (usetable) ptrhash_put(&backref_table, (void*)(ptrint_t)pos, m); m->parent = (jl_module_t*)jl_deserialize_value(s); while (1) { jl_value_t *name = jl_deserialize_value(s); if (name == NULL) break; jl_binding_t *b = jl_get_binding_wr(m, (jl_sym_t*)name); b->value = jl_deserialize_value(s); b->type = (jl_type_t*)jl_deserialize_value(s); b->owner = (jl_module_t*)jl_deserialize_value(s); int8_t flags = read_int8(s); b->constp = (flags>>2) & 1; b->exportp = (flags>>1) & 1; b->imported = (flags) & 1; } size_t ni = read_int32(s); for(size_t i=0; i < ni; i++) { arraylist_push(&m->usings, jl_deserialize_value(s)); } return (jl_value_t*)m; }
DLLEXPORT jl_value_t *jl_method_def(jl_sym_t *name, jl_value_t **bp, jl_binding_t *bnd, jl_tuple_t *argtypes, jl_function_t *f, jl_value_t *isstaged, jl_value_t *call_func, int iskw) { // argtypes is a tuple ((types...), (typevars...)) jl_tuple_t *t = (jl_tuple_t*)jl_t1(argtypes); argtypes = (jl_tuple_t*)jl_t0(argtypes); jl_value_t *gf=NULL; JL_GC_PUSH3(&gf, &argtypes, &t); if (bnd && bnd->value != NULL && !bnd->constp) { jl_errorf("cannot define function %s; it already has a value", bnd->name->name); } if (*bp != NULL) { gf = *bp; if (!jl_is_gf(gf)) { if (jl_is_datatype(gf)) { // DataType: define `call`, for backwards compat with outer constructors if (call_func == NULL) call_func = (jl_value_t*)jl_module_call_func(jl_current_module); size_t na = jl_tuple_len(argtypes); jl_tuple_t *newargtypes = jl_alloc_tuple(1 + na); JL_GC_PUSH1(&newargtypes); size_t i=0; if (iskw) { assert(na > 0); // for kw sorter, keep container argument first jl_tupleset(newargtypes, 0, jl_tupleref(argtypes, 0)); i++; } jl_tupleset(newargtypes, i, jl_wrap_Type(gf)); i++; for(; i < na+1; i++) { jl_tupleset(newargtypes, i, jl_tupleref(argtypes, i-1)); } argtypes = newargtypes; JL_GC_POP(); gf = call_func; name = call_sym; // edit args, insert type first if (!jl_is_expr(f->linfo->ast)) f->linfo->ast = jl_uncompress_ast(f->linfo, f->linfo->ast); jl_array_t *al = jl_lam_args((jl_expr_t*)f->linfo->ast); if (jl_array_len(al) == 0) { al = jl_alloc_cell_1d(1); jl_exprarg(f->linfo->ast, 0) = (jl_value_t*)al; } else { jl_array_grow_beg(al, 1); } if (iskw) { jl_cellset(al, 0, jl_cellref(al, 1)); jl_cellset(al, 1, (jl_value_t*)jl_gensym()); } else { jl_cellset(al, 0, (jl_value_t*)jl_gensym()); } } if (!jl_is_gf(gf)) { jl_error("invalid method definition: not a generic function"); } } if (iskw) { bp = (jl_value_t**)&((jl_methtable_t*)((jl_function_t*)gf)->env)->kwsorter; gf = *bp; } } size_t na = jl_tuple_len(argtypes); for(size_t i=0; i < na; i++) { jl_value_t *elt = jl_tupleref(argtypes,i); if (!jl_is_type(elt) && !jl_is_typevar(elt)) { jl_lambda_info_t *li = f->linfo; jl_errorf("invalid type for argument %s in method definition for %s at %s:%d", jl_lam_argname(li,i)->name, name->name, li->file->name, li->line); } } int ishidden = !!strchr(name->name, '#'); for(size_t i=0; i < jl_tuple_len(t); i++) { jl_value_t *tv = jl_tupleref(t,i); if (!jl_is_typevar(tv)) jl_type_error_rt(name->name, "method definition", (jl_value_t*)jl_tvar_type, tv); if (!ishidden && !type_contains((jl_value_t*)argtypes, tv)) { JL_PRINTF(JL_STDERR, "Warning: static parameter %s does not occur in signature for %s", ((jl_tvar_t*)tv)->name->name, name->name); print_func_loc(JL_STDERR, f->linfo); JL_PRINTF(JL_STDERR, ".\nThe method will not be callable.\n"); } } if (bnd) { bnd->constp = 1; } if (*bp == NULL) { gf = (jl_value_t*)jl_new_generic_function(name); *bp = gf; } assert(jl_is_function(f)); assert(jl_is_tuple(argtypes)); assert(jl_is_tuple(t)); jl_add_method((jl_function_t*)gf, argtypes, f, t, isstaged == jl_true); if (jl_boot_file_loaded && f->linfo && f->linfo->ast && jl_is_expr(f->linfo->ast)) { jl_lambda_info_t *li = f->linfo; li->ast = jl_compress_ast(li, li->ast); } JL_GC_POP(); return gf; }
static jl_function_t *cache_method(jl_methtable_t *mt, jl_tuple_t *type, jl_function_t *method, jl_tuple_t *decl, jl_tuple_t *sparams) { size_t i; int need_dummy_entries = 0; jl_value_t *temp=NULL; jl_function_t *newmeth=NULL; JL_GC_PUSH(&type, &temp, &newmeth); for (i=0; i < type->length; i++) { jl_value_t *elt = jl_tupleref(type,i); int set_to_any = 0; if (nth_slot_type(decl,i) == jl_ANY_flag) { // don't specialize on slots marked ANY temp = jl_tupleref(type, i); jl_tupleset(type, i, (jl_value_t*)jl_any_type); int nintr=0; jl_methlist_t *curr = mt->defs; // if this method is the only match even with the current slot // set to Any, then it is safe to cache it that way. while (curr != NULL && curr->func!=method) { if (jl_type_intersection((jl_value_t*)curr->sig, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) { nintr++; break; } curr = curr->next; } if (nintr) { // TODO: even if different specializations of this slot need // separate cache entries, have them share code. jl_tupleset(type, i, temp); } else { set_to_any = 1; } } if (set_to_any) { } else if (jl_is_tuple(elt)) { /* don't cache tuple type exactly; just remember that it was a tuple, unless the declaration asks for something more specific. determined with a type intersection. */ int might_need_dummy=0; temp = jl_tupleref(type, i); if (i < decl->length) { jl_value_t *declt = jl_tupleref(decl,i); // for T..., intersect with T if (jl_is_seq_type(declt)) declt = jl_tparam0(declt); if (declt == (jl_value_t*)jl_tuple_type || jl_subtype((jl_value_t*)jl_tuple_type, declt, 0)) { // don't specialize args that matched (Any...) or Any jl_tupleset(type, i, (jl_value_t*)jl_tuple_type); might_need_dummy = 1; } else { declt = jl_type_intersection(declt, (jl_value_t*)jl_tuple_type); if (((jl_tuple_t*)elt)->length > 3 || tuple_all_Any((jl_tuple_t*)declt)) { jl_tupleset(type, i, declt); might_need_dummy = 1; } } } else { jl_tupleset(type, i, (jl_value_t*)jl_tuple_type); might_need_dummy = 1; } assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type); if (might_need_dummy) { jl_methlist_t *curr = mt->defs; // can't generalize type if there's an overlapping definition // with typevars while (curr != NULL && curr->func!=method) { if (curr->tvars!=jl_null && jl_type_intersection((jl_value_t*)curr->sig, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) { jl_tupleset(type, i, temp); might_need_dummy = 0; break; } curr = curr->next; } } if (might_need_dummy) { jl_methlist_t *curr = mt->defs; while (curr != NULL && curr->func!=method) { jl_tuple_t *sig = curr->sig; if (sig->length > i && jl_is_tuple(jl_tupleref(sig,i))) { need_dummy_entries = 1; break; } curr = curr->next; } } } else if (jl_is_type_type(elt) && jl_is_type_type(jl_tparam0(elt))) { /* actual argument was Type{...}, we computed its type as Type{Type{...}}. we must avoid unbounded nesting here, so cache the signature as Type{T}, unless something more specific like Type{Type{Int32}} was actually declared. this can be determined using a type intersection. */ if (i < decl->length) { jl_value_t *declt = jl_tupleref(decl,i); // for T..., intersect with T if (jl_is_seq_type(declt)) declt = jl_tparam0(declt); jl_tupleset(type, i, jl_type_intersection(declt, (jl_value_t*)jl_typetype_type)); } else { jl_tupleset(type, i, (jl_value_t*)jl_typetype_type); } assert(jl_tupleref(type,i) != (jl_value_t*)jl_bottom_type); } else if (jl_is_type_type(elt) && very_general_type(nth_slot_type(decl,i))) { /* here's a fairly complex heuristic: if this argument slot's declared type is Any, and no definition overlaps with Type for this slot, then don't specialize for every Type that might be passed. Since every type x has its own type Type{x}, this would be excessive specialization for an Any slot. */ int ok=1; jl_methlist_t *curr = mt->defs; while (curr != NULL) { jl_value_t *slottype = nth_slot_type(curr->sig, i); if (slottype && !very_general_type(slottype) && jl_type_intersection(slottype, (jl_value_t*)jl_type_type) != (jl_value_t*)jl_bottom_type) { ok=0; break; } curr = curr->next; } if (ok) { jl_tupleset(type, i, (jl_value_t*)jl_typetype_type); } } } // for varargs methods, only specialize up to max_args. // in general, here we want to find the biggest type that's not a // supertype of any other method signatures. so far we are conservative // and the types we find should be bigger. if (type->length > jl_unbox_long(mt->max_args) && jl_is_seq_type(jl_tupleref(decl,decl->length-1))) { size_t nspec = jl_unbox_long(mt->max_args)+2; jl_tuple_t *limited = jl_alloc_tuple(nspec); for(i=0; i < nspec-1; i++) { jl_tupleset(limited, i, jl_tupleref(type, i)); } jl_value_t *lasttype = jl_tupleref(type,i-1); // if all subsequent arguments are subtypes of lasttype, specialize // on that instead of decl. for example, if decl is // (Any...) // and type is // (Symbol, Symbol, Symbol) // then specialize as (Symbol...), but if type is // (Symbol, Int32, Expr) // then specialize as (Any...) size_t j = i; int all_are_subtypes=1; for(; j < type->length; j++) { if (!jl_subtype(jl_tupleref(type,j), lasttype, 0)) { all_are_subtypes = 0; break; } } type = limited; if (all_are_subtypes) { // avoid Type{Type{...}...}... if (jl_is_type_type(lasttype)) lasttype = (jl_value_t*)jl_type_type; temp = (jl_value_t*)jl_tuple1(lasttype); jl_tupleset(type, i, jl_apply_type((jl_value_t*)jl_seq_type, (jl_tuple_t*)temp)); } else { jl_value_t *lastdeclt = jl_tupleref(decl,decl->length-1); if (sparams->length > 0) { lastdeclt = (jl_value_t*) jl_instantiate_type_with((jl_type_t*)lastdeclt, sparams->data, sparams->length/2); } jl_tupleset(type, i, lastdeclt); } // now there is a problem: the computed signature is more // general than just the given arguments, so it might conflict // with another definition that doesn't have cache instances yet. // to fix this, we insert dummy cache entries for all intersections // of this signature and definitions. those dummy entries will // supersede this one in conflicted cases, alerting us that there // should actually be a cache miss. need_dummy_entries = 1; } if (need_dummy_entries) { temp = ml_matches(mt->defs, (jl_value_t*)type, lambda_sym, -1); for(i=0; i < jl_array_len(temp); i++) { jl_value_t *m = jl_cellref(temp, i); if (jl_tupleref(m,2) != (jl_value_t*)method->linfo) { jl_method_cache_insert(mt, (jl_tuple_t*)jl_tupleref(m, 0), NULL); } } } // here we infer types and specialize the method /* if (sparams==jl_null) newmeth = method; else */ jl_array_t *lilist=NULL; jl_lambda_info_t *li=NULL; if (method->linfo && method->linfo->specializations!=NULL) { // reuse code already generated for this combination of lambda and // arguments types. this happens for inner generic functions where // a new closure is generated on each call to the enclosing function. lilist = method->linfo->specializations; int k; for(k=0; k < lilist->length; k++) { li = (jl_lambda_info_t*)jl_cellref(lilist, k); if (jl_types_equal(li->specTypes, (jl_value_t*)type)) break; } if (k == lilist->length) lilist=NULL; } if (lilist != NULL && !li->inInference) { assert(li); newmeth = jl_reinstantiate_method(method, li); (void)jl_method_cache_insert(mt, type, newmeth); JL_GC_POP(); return newmeth; } else { newmeth = jl_instantiate_method(method, sparams); } /* if "method" itself can ever be compiled, for example for use as an unspecialized method (see below), then newmeth->fptr might point to some slow compiled code instead of jl_trampoline, meaning our type-inferred code would never get compiled. this can be fixed with the commented-out snippet below. */ assert(!(newmeth->linfo && newmeth->linfo->ast) || newmeth->fptr == &jl_trampoline); /* if (newmeth->linfo&&newmeth->linfo->ast&&newmeth->fptr!=&jl_trampoline) { newmeth->fptr = &jl_trampoline; } */ (void)jl_method_cache_insert(mt, type, newmeth); if (newmeth->linfo != NULL && newmeth->linfo->sparams == jl_null) { // when there are no static parameters, one unspecialized version // of a function can be shared among all cached specializations. if (method->linfo->unspecialized == NULL) { method->linfo->unspecialized = jl_instantiate_method(method, jl_null); } newmeth->linfo->unspecialized = method->linfo->unspecialized; } if (newmeth->linfo != NULL && newmeth->linfo->ast != NULL) { newmeth->linfo->specTypes = (jl_value_t*)type; jl_array_t *spe = method->linfo->specializations; if (spe == NULL) { spe = jl_alloc_cell_1d(1); jl_cellset(spe, 0, newmeth->linfo); } else { jl_cell_1d_push(spe, (jl_value_t*)newmeth->linfo); } method->linfo->specializations = spe; jl_type_infer(newmeth->linfo, type, method->linfo); } JL_GC_POP(); return newmeth; }
static jl_value_t *R_Julia_MD(SEXP Var, const char *VarName) { if ((LENGTH(Var)) != 0) { jl_tuple_t *dims = RDims_JuliaTuple(Var); switch (TYPEOF( Var)) { case LGLSXP: { jl_array_t *ret = CreateArray(jl_bool_type, jl_tuple_len(dims), dims); JL_GC_PUSH1(&ret); char *retData = (char *)jl_array_data(ret); for (size_t i = 0; i < jl_array_len(ret); i++) retData[i] = LOGICAL(Var)[i]; jl_set_global(jl_main_module, jl_symbol(VarName), (jl_value_t *)ret); return (jl_value_t *) ret; JL_GC_POP(); break; }; case INTSXP: { jl_array_t *ret = CreateArray(jl_int32_type, jl_tuple_len(dims), dims); JL_GC_PUSH1(&ret); int *retData = (int *)jl_array_data(ret); for (size_t i = 0; i < jl_array_len(ret); i++) retData[i] = INTEGER(Var)[i]; jl_set_global(jl_main_module, jl_symbol(VarName), (jl_value_t *)ret); return (jl_value_t *) ret; JL_GC_POP(); break; } case REALSXP: { jl_array_t *ret = CreateArray(jl_float64_type, jl_tuple_len(dims), dims); JL_GC_PUSH1(&ret); double *retData = (double *)jl_array_data(ret); for (size_t i = 0; i < jl_array_len(ret); i++) retData[i] = REAL(Var)[i]; jl_set_global(jl_main_module, jl_symbol(VarName), (jl_value_t *)ret); JL_GC_POP(); return (jl_value_t *) ret; break; } case STRSXP: { jl_array_t *ret; if (!IS_ASCII(Var)) ret = CreateArray(jl_utf8_string_type, jl_tuple_len(dims), dims); else ret = CreateArray(jl_ascii_string_type, jl_tuple_len(dims), dims); JL_GC_PUSH1(&ret); jl_value_t **retData = jl_array_data(ret); for (size_t i = 0; i < jl_array_len(ret); i++) if (!IS_ASCII(Var)) retData[i] = jl_cstr_to_string(translateChar0(STRING_ELT(Var, i))); else retData[i] = jl_cstr_to_string(CHAR(STRING_ELT(Var, i))); jl_set_global(jl_main_module, jl_symbol(VarName), (jl_value_t *)ret); JL_GC_POP(); return (jl_value_t *) ret; break; } case VECSXP: { char eltcmd[eltsize]; jl_tuple_t *ret = jl_alloc_tuple(length(Var)); JL_GC_PUSH1(&ret); for (int i = 0; i < length(Var); i++) { snprintf(eltcmd, eltsize, "%selement%d", VarName, i); jl_tupleset(ret, i, R_Julia_MD(VECTOR_ELT(Var, i), eltcmd)); } jl_set_global(jl_main_module, jl_symbol(VarName), (jl_value_t *)ret); JL_GC_POP(); return (jl_value_t *) ret; } default: { return (jl_value_t *) jl_nothing; } break; } return (jl_value_t *) jl_nothing; } return (jl_value_t *) jl_nothing; }