void jl_compute_struct_offsets(jl_struct_type_t *st) { size_t sz = 0, alignm = 0; for(size_t i=0; i < st->types->length; i++) { jl_value_t *ty = jl_tupleref(st->types, i); size_t fsz, al; if (jl_is_bits_type(ty)) { fsz = jl_bitstype_nbits(ty)/8; al = fsz; // alignment == size for bits types st->fields[i].isptr = 0; } else { fsz = sizeof(void*); al = fsz; st->fields[i].isptr = 1; } sz = LLT_ALIGN(sz, al); if (al > alignm) alignm = al; st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); al = ((jl_datatype_t*)ty)->alignment; st->fields[i].isptr = 0; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; st->fields[i].isptr = 1; ptrfree = 0; } if (al != 0) { sz = LLT_ALIGN(sz, al); if (al > alignm) alignm = al; } st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); st->pointerfree = ptrfree && !st->abstract; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 0; int ptrfree = 1; for(size_t i=0; i < jl_tuple_len(st->types); i++) { jl_value_t *ty = jl_tupleref(st->types, i); size_t fsz, al; if (jl_isbits(ty) && (al=((jl_datatype_t*)ty)->alignment)!=0) { fsz = jl_datatype_size(ty); st->fields[i].isptr = 0; } else { fsz = sizeof(void*); al = fsz; st->fields[i].isptr = 1; ptrfree = 0; } sz = LLT_ALIGN(sz, al); if (al > alignm) alignm = al; st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); st->pointerfree = ptrfree && !st->abstract; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; assert(0 <= st->fielddesc_type && st->fielddesc_type <= 2); uint64_t max_offset = (((uint64_t)1) << (1 << (3 + st->fielddesc_type))) - 1; uint64_t max_size = max_offset >> 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; jl_field_setisptr(st, i, 0); if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; jl_field_setisptr(st, i, 1); ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } jl_field_setoffset(st, i, sz); jl_field_setsize(st, i, fsz); if (__unlikely(max_offset - sz < fsz)) jl_throw(jl_overflow_exception); sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) st->haspadding = 1; st->pointerfree = ptrfree && !st->abstract; }
// Note that this function updates len static jl_value_t *jl_new_bits_internal(jl_value_t *dt, void *data, size_t *len) { assert(jl_is_datatype(dt)); jl_datatype_t *bt = (jl_datatype_t*)dt; size_t nb = jl_datatype_size(bt); if (nb == 0) return jl_new_struct_uninit(bt); *len = LLT_ALIGN(*len, bt->alignment); data = (char*)data + (*len); *len += nb; if (bt == jl_uint8_type) return jl_box_uint8(*(uint8_t*)data); if (bt == jl_int64_type) return jl_box_int64(*(int64_t*)data); if (bt == jl_bool_type) return (*(int8_t*)data) ? jl_true:jl_false; if (bt == jl_int32_type) return jl_box_int32(*(int32_t*)data); if (bt == jl_float64_type) return jl_box_float64(*(double*)data); jl_value_t *v = (jl_value_t*)newobj((jl_value_t*)bt, NWORDS(nb)); switch (nb) { case 1: *(int8_t*) jl_data_ptr(v) = *(int8_t*)data; break; case 2: *(int16_t*) jl_data_ptr(v) = *(int16_t*)data; break; case 4: *(int32_t*) jl_data_ptr(v) = *(int32_t*)data; break; case 8: *(int64_t*) jl_data_ptr(v) = *(int64_t*)data; break; case 16: *(bits128_t*)jl_data_ptr(v) = *(bits128_t*)data; break; default: memcpy(jl_data_ptr(v), data, nb); } return v; }
static jl_value_t *new_scalar(jl_bits_type_t *bt) { size_t nb = jl_bitstype_nbits(bt)/8; jl_value_t *v = (jl_value_t*)allocobj((NWORDS(LLT_ALIGN(nb,sizeof(void*)))+1)* sizeof(void*)); v->type = (jl_type_t*)bt; return v; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); if (__unlikely(fsz > JL_FIELD_MAX_SIZE)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; st->fields[i].isptr = 0; if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; st->fields[i].isptr = 1; ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (alsz > sz) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } if (__unlikely(sz > JL_FIELD_MAX_OFFSET)) jl_throw(jl_overflow_exception); st->fields[i].offset = sz; st->fields[i].size = fsz; sz += fsz; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); st->pointerfree = ptrfree && !st->abstract; }
static jl_value_t *jl_new_bits_internal(jl_value_t *dt, void *data, size_t *len) { if (jl_is_tuple(dt)) { jl_tuple_t *tuple = (jl_tuple_t*)dt; *len = LLT_ALIGN(*len, jl_new_bits_align(dt)); size_t i, l = jl_tuple_len(tuple); jl_value_t *v = (jl_value_t*) jl_alloc_tuple(l); JL_GC_PUSH1(v); for (i = 0; i < l; i++) { jl_tupleset(v,i,jl_new_bits_internal(jl_tupleref(tuple,i), (char*)data, len)); } JL_GC_POP(); return v; } jl_datatype_t *bt = (jl_datatype_t*)dt; size_t nb = jl_datatype_size(bt); if (nb == 0) return jl_new_struct_uninit(bt); *len = LLT_ALIGN(*len, bt->alignment); data = (char*)data + (*len); *len += nb; if (bt == jl_uint8_type) return jl_box_uint8(*(uint8_t*)data); if (bt == jl_int64_type) return jl_box_int64(*(int64_t*)data); if (bt == jl_bool_type) return (*(int8_t*)data) ? jl_true:jl_false; if (bt == jl_int32_type) return jl_box_int32(*(int32_t*)data); if (bt == jl_float64_type) return jl_box_float64(*(double*)data); jl_value_t *v = (jl_value_t*)allocobj((NWORDS(LLT_ALIGN(nb,sizeof(void*)))+1)* sizeof(void*)); v->type = (jl_value_t*)bt; switch (nb) { case 1: *(int8_t*) jl_data_ptr(v) = *(int8_t*)data; break; case 2: *(int16_t*) jl_data_ptr(v) = *(int16_t*)data; break; case 4: *(int32_t*) jl_data_ptr(v) = *(int32_t*)data; break; case 8: *(int64_t*) jl_data_ptr(v) = *(int64_t*)data; break; case 16: *(bits128_t*)jl_data_ptr(v) = *(bits128_t*)data; break; default: memcpy(jl_data_ptr(v), data, nb); } return v; }
static void *alloc_sigstack(size_t size) { size_t pagesz = jl_getpagesize(); // Add one guard page to catch stack overflow in the signal handler size = LLT_ALIGN(size, pagesz) + pagesz; void *stackbuff = mmap(0, size, PROT_READ | PROT_WRITE, MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (stackbuff == MAP_FAILED) jl_errorf("fatal error allocating signal stack: mmap: %s", strerror(errno)); mprotect(stackbuff, pagesz, PROT_NONE); return (void*)((char*)stackbuff + pagesz); }
// run time version of pointerref intrinsic (warning: i is not rooted) JL_DLLEXPORT jl_value_t *jl_pointerref(jl_value_t *p, jl_value_t *i) { JL_TYPECHK(pointerref, pointer, p); JL_TYPECHK(pointerref, long, i); jl_value_t *ety = jl_tparam0(jl_typeof(p)); if (ety == (jl_value_t*)jl_any_type) { jl_value_t **pp = (jl_value_t**)(jl_unbox_long(p) + (jl_unbox_long(i)-1)*sizeof(void*)); return *pp; } else { if (!jl_is_datatype(ety)) jl_error("pointerref: invalid pointer"); size_t nb = LLT_ALIGN(jl_datatype_size(ety), ((jl_datatype_t*)ety)->layout->alignment); char *pp = (char*)jl_unbox_long(p) + (jl_unbox_long(i)-1)*nb; return jl_new_bits(ety, pp); } }
// run time version of pointerset intrinsic DLLEXPORT void jl_pointerset(jl_value_t *p, jl_value_t *x, jl_value_t *i) { JL_TYPECHK(pointerset, pointer, p); JL_TYPECHK(pointerset, long, i); jl_value_t *ety = jl_tparam0(jl_typeof(p)); if (ety == (jl_value_t*)jl_any_type) { jl_value_t **pp = (jl_value_t**)(jl_unbox_long(p) + (jl_unbox_long(i)-1)*sizeof(void*)); *pp = x; } else { if (!jl_is_datatype(ety)) jl_error("pointerset: invalid pointer"); size_t nb = LLT_ALIGN(jl_datatype_size(ety), ((jl_datatype_t*)ety)->alignment); char *pp = (char*)jl_unbox_long(p) + (jl_unbox_long(i)-1)*nb; if (jl_typeof(x) != ety) jl_error("pointerset: type mismatch in assign"); jl_assign_bits(pp, x); } }
// run time version of pointerset intrinsic (warning: x is not gc-rooted) JL_DLLEXPORT jl_value_t *jl_pointerset(jl_value_t *p, jl_value_t *x, jl_value_t *i, jl_value_t *align) { JL_TYPECHK(pointerset, pointer, p); JL_TYPECHK(pointerset, long, i); JL_TYPECHK(pointerref, long, align); jl_value_t *ety = jl_tparam0(jl_typeof(p)); if (ety == (jl_value_t*)jl_any_type) { jl_value_t **pp = (jl_value_t**)(jl_unbox_long(p) + (jl_unbox_long(i)-1)*sizeof(void*)); *pp = x; } else { if (!jl_is_datatype(ety)) jl_error("pointerset: invalid pointer"); size_t elsz = jl_datatype_size(ety); size_t nb = LLT_ALIGN(elsz, jl_datatype_align(ety)); char *pp = (char*)jl_unbox_long(p) + (jl_unbox_long(i)-1)*nb; if (jl_typeof(x) != ety) jl_error("pointerset: type mismatch in assign"); memcpy(pp, x, elsz); } return p; }
jl_value_t *jl_new_bits(jl_datatype_t *bt, void *data) { if (bt == jl_uint8_type) return jl_box_uint8(*(uint8_t*)data); else if (bt == jl_int64_type) return jl_box_int64(*(int64_t*)data); else if (bt == jl_bool_type) return (*(int8_t*)data) ? jl_true:jl_false; else if (bt == jl_int32_type) return jl_box_int32(*(int32_t*)data); else if (bt == jl_float64_type) return jl_box_float64(*(double*)data); size_t nb = jl_datatype_size(bt); jl_value_t *v = (jl_value_t*)allocobj((NWORDS(LLT_ALIGN(nb,sizeof(void*)))+1)* sizeof(void*)); v->type = (jl_value_t*)bt; switch (nb) { case 1: *(int8_t*) jl_data_ptr(v) = *(int8_t*)data; break; case 2: *(int16_t*) jl_data_ptr(v) = *(int16_t*)data; break; case 4: *(int32_t*) jl_data_ptr(v) = *(int32_t*)data; break; case 8: *(int64_t*) jl_data_ptr(v) = *(int64_t*)data; break; case 16: *(bits128_t*)jl_data_ptr(v) = *(bits128_t*)data; break; default: memcpy(jl_data_ptr(v), data, nb); } return v; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int homogeneous = 1; jl_value_t *lastty = NULL; uint64_t max_offset = (((uint64_t)1) << 32) - 1; uint64_t max_size = max_offset >> 1; uint32_t nfields = jl_svec_len(st->types); jl_fielddesc32_t* desc = (jl_fielddesc32_t*) alloca(nfields * sizeof(jl_fielddesc32_t)); int haspadding = 0; assert(st->name == jl_tuple_typename || st == jl_sym_type || st == jl_simplevector_type || nfields != 0); for (size_t i = 0; i < nfields; i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty) && ((jl_datatype_t*)ty)->layout) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->layout->alignment; desc[i].isptr = 0; if (((jl_datatype_t*)ty)->layout->haspadding) haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; desc[i].isptr = 1; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } homogeneous &= lastty==NULL || lastty==ty; lastty = ty; desc[i].offset = sz; desc[i].size = fsz; if (__unlikely(max_offset - sz < fsz)) jl_throw(jl_overflow_exception); sz += fsz; } if (homogeneous && lastty!=NULL && jl_is_tuple_type(st)) { // Some tuples become LLVM vectors with stronger alignment than what was calculated above. unsigned al = jl_special_vector_alignment(nfields, lastty); assert(al % alignm == 0); if (al) alignm = al; } st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) haspadding = 1; st->layout = jl_get_layout(nfields, alignm, haspadding, desc); }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int homogeneous = 1; jl_value_t *lastty = NULL; uint64_t max_offset = (((uint64_t)1) << 32) - 1; uint64_t max_size = max_offset >> 1; if (st->name->wrapper) { // If layout doesn't depend on type parameters, it's stored in st->name->wrapper // and reused by all subtypes. jl_datatype_t *w = (jl_datatype_t*)jl_unwrap_unionall(st->name->wrapper); if (st != w && // this check allows us to re-compute layout for some types during init w->layout) { st->layout = w->layout; st->size = w->size; return; } } if (st->types == NULL) return; uint32_t nfields = jl_svec_len(st->types); if (nfields == 0) { if (st == jl_sym_type || st == jl_string_type) { // opaque layout - heap-allocated blob static const jl_datatype_layout_t opaque_byte_layout = {0, 1, 0, 1, 0}; st->layout = &opaque_byte_layout; } else if (st == jl_simplevector_type || st->name == jl_array_typename) { static const jl_datatype_layout_t opaque_ptr_layout = {0, sizeof(void*), 0, 1, 0}; st->layout = &opaque_ptr_layout; } else { // reuse the same layout for all singletons static const jl_datatype_layout_t singleton_layout = {0, 1, 0, 0, 0}; st->layout = &singleton_layout; } return; } if (!jl_is_leaf_type((jl_value_t*)st)) { // compute layout whenever field types have no free variables for (size_t i = 0; i < nfields; i++) { if (jl_has_free_typevars(jl_field_type(st, i))) return; } } size_t descsz = nfields * sizeof(jl_fielddesc32_t); jl_fielddesc32_t *desc; if (descsz < jl_page_size) desc = (jl_fielddesc32_t*)alloca(descsz); else desc = (jl_fielddesc32_t*)malloc(descsz); int haspadding = 0; assert(st->name == jl_tuple_typename || st == jl_sym_type || st == jl_simplevector_type || nfields != 0); for (size_t i = 0; i < nfields; i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty) && ((jl_datatype_t*)ty)->layout) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) goto throw_ovf; al = jl_datatype_align(ty); desc[i].isptr = 0; if (((jl_datatype_t*)ty)->layout->haspadding) haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; desc[i].isptr = 1; } assert(al <= JL_HEAP_ALIGNMENT && (JL_HEAP_ALIGNMENT % al) == 0); if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } homogeneous &= lastty==NULL || lastty==ty; lastty = ty; desc[i].offset = sz; desc[i].size = fsz; if (__unlikely(max_offset - sz < fsz)) goto throw_ovf; sz += fsz; } if (homogeneous && lastty!=NULL && jl_is_tuple_type(st)) { // Some tuples become LLVM vectors with stronger alignment than what was calculated above. unsigned al = jl_special_vector_alignment(nfields, lastty); assert(al % alignm == 0); // JL_HEAP_ALIGNMENT is the biggest alignment we can guarantee on the heap. if (al > JL_HEAP_ALIGNMENT) alignm = JL_HEAP_ALIGNMENT; else if (al) alignm = al; } st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) haspadding = 1; st->layout = jl_get_layout(nfields, alignm, haspadding, desc); if (descsz >= jl_page_size) free(desc); return; throw_ovf: if (descsz >= jl_page_size) free(desc); jl_throw(jl_overflow_exception); }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int ptrfree = 1; int homogeneous = 1; jl_value_t *lastty = NULL; assert(0 <= st->fielddesc_type && st->fielddesc_type <= 2); uint64_t max_offset = (((uint64_t)1) << (1 << (3 + st->fielddesc_type))) - 1; uint64_t max_size = max_offset >> 1; for(size_t i=0; i < jl_datatype_nfields(st); i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz, al; if (jl_isbits(ty) && jl_is_leaf_type(ty)) { fsz = jl_datatype_size(ty); // Should never happen if (__unlikely(fsz > max_size)) jl_throw(jl_overflow_exception); al = ((jl_datatype_t*)ty)->alignment; jl_field_setisptr(st, i, 0); if (((jl_datatype_t*)ty)->haspadding) st->haspadding = 1; } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; jl_field_setisptr(st, i, 1); ptrfree = 0; } if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) st->haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } homogeneous &= lastty==NULL || lastty==ty; lastty = ty; jl_field_setoffset(st, i, sz); jl_field_setsize(st, i, fsz); if (__unlikely(max_offset - sz < fsz)) jl_throw(jl_overflow_exception); sz += fsz; } if (homogeneous && lastty!=NULL && jl_is_tuple_type(st)) { // Some tuples become LLVM vectors with stronger alignment than what was calculated above. unsigned al = jl_special_vector_alignment(jl_datatype_nfields(st), lastty); assert(al % alignm == 0); if (al) alignm = al; } st->alignment = alignm; st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) st->haspadding = 1; st->pointerfree = ptrfree && !st->abstract; }
void jl_compute_field_offsets(jl_datatype_t *st) { size_t sz = 0, alignm = 1; int homogeneous = 1; jl_value_t *lastty = NULL; uint64_t max_offset = (((uint64_t)1) << 32) - 1; uint64_t max_size = max_offset >> 1; if (st->name->wrapper) { jl_datatype_t *w = (jl_datatype_t*)jl_unwrap_unionall(st->name->wrapper); // compute whether this type can be inlined // based on whether its definition is self-referential if (w->types != NULL) { st->isbitstype = st->isconcretetype && !st->mutabl; size_t i, nf = jl_field_count(st); for (i = 0; i < nf; i++) { jl_value_t *fld = jl_field_type(st, i); if (st->isbitstype) st->isbitstype = jl_is_datatype(fld) && ((jl_datatype_t*)fld)->isbitstype; if (!st->zeroinit) st->zeroinit = (jl_is_datatype(fld) && ((jl_datatype_t*)fld)->isinlinealloc) ? ((jl_datatype_t*)fld)->zeroinit : 1; } if (st->isbitstype) { st->isinlinealloc = 1; size_t i, nf = jl_field_count(w); for (i = 0; i < nf; i++) { jl_value_t *fld = jl_field_type(w, i); if (references_name(fld, w->name)) { st->isinlinealloc = 0; st->isbitstype = 0; st->zeroinit = 1; break; } } } } // If layout doesn't depend on type parameters, it's stored in st->name->wrapper // and reused by all subtypes. if (st != w && // this check allows us to re-compute layout for some types during init w->layout) { st->layout = w->layout; st->size = w->size; jl_allocate_singleton_instance(st); return; } } if (st->types == NULL || (jl_is_namedtuple_type(st) && !jl_is_concrete_type((jl_value_t*)st))) return; uint32_t nfields = jl_svec_len(st->types); if (nfields == 0) { if (st == jl_sym_type || st == jl_string_type) { // opaque layout - heap-allocated blob static const jl_datatype_layout_t opaque_byte_layout = {0, 1, 0, 1, 0}; st->layout = &opaque_byte_layout; } else if (st == jl_simplevector_type || st->name == jl_array_typename) { static const jl_datatype_layout_t opaque_ptr_layout = {0, sizeof(void*), 0, 1, 0}; st->layout = &opaque_ptr_layout; } else { // reuse the same layout for all singletons static const jl_datatype_layout_t singleton_layout = {0, 1, 0, 0, 0}; st->layout = &singleton_layout; jl_allocate_singleton_instance(st); } return; } if (!jl_is_concrete_type((jl_value_t*)st)) { // compute layout whenever field types have no free variables for (size_t i = 0; i < nfields; i++) { if (jl_has_free_typevars(jl_field_type(st, i))) return; } } size_t descsz = nfields * sizeof(jl_fielddesc32_t); jl_fielddesc32_t *desc; if (descsz < jl_page_size) desc = (jl_fielddesc32_t*)alloca(descsz); else desc = (jl_fielddesc32_t*)malloc(descsz); int haspadding = 0; assert(st->name == jl_tuple_typename || st == jl_sym_type || st == jl_simplevector_type || nfields != 0); for (size_t i = 0; i < nfields; i++) { jl_value_t *ty = jl_field_type(st, i); size_t fsz = 0, al = 0; if (jl_islayout_inline(ty, &fsz, &al)) { if (__unlikely(fsz > max_size)) // Should never happen goto throw_ovf; desc[i].isptr = 0; if (jl_is_uniontype(ty)) { haspadding = 1; fsz += 1; // selector byte } else { // isbits struct if (((jl_datatype_t*)ty)->layout->haspadding) haspadding = 1; } } else { fsz = sizeof(void*); if (fsz > MAX_ALIGN) fsz = MAX_ALIGN; al = fsz; desc[i].isptr = 1; } assert(al <= JL_HEAP_ALIGNMENT && (JL_HEAP_ALIGNMENT % al) == 0); if (al != 0) { size_t alsz = LLT_ALIGN(sz, al); if (sz & (al - 1)) haspadding = 1; sz = alsz; if (al > alignm) alignm = al; } homogeneous &= lastty==NULL || lastty==ty; lastty = ty; desc[i].offset = sz; desc[i].size = fsz; if (__unlikely(max_offset - sz < fsz)) goto throw_ovf; sz += fsz; } if (homogeneous && lastty != NULL && jl_is_tuple_type(st)) { // Some tuples become LLVM vectors with stronger alignment than what was calculated above. unsigned al = jl_special_vector_alignment(nfields, lastty); assert(al % alignm == 0); // JL_HEAP_ALIGNMENT is the biggest alignment we can guarantee on the heap. if (al > JL_HEAP_ALIGNMENT) alignm = JL_HEAP_ALIGNMENT; else if (al) alignm = al; } st->size = LLT_ALIGN(sz, alignm); if (st->size > sz) haspadding = 1; st->layout = jl_get_layout(nfields, alignm, haspadding, desc); if (descsz >= jl_page_size) free(desc); jl_allocate_singleton_instance(st); return; throw_ovf: if (descsz >= jl_page_size) free(desc); jl_errorf("type %s has field offset %d that exceeds the page size", jl_symbol_name(st->name->name), descsz); }