/* This works out an allocation strategy for the object. It takes care of * "inlining" storage of attributes that are natively typed, as well as * noting unbox targets. */ static void compute_allocation_strategy(MVMThreadContext *tc, MVMObject *repr_info, MVMCPPStructREPRData *repr_data) { /* Compute index mapping table and get flat list of attributes. */ MVMObject *flat_list = index_mapping_and_flat_list(tc, repr_info, repr_data); /* If we have no attributes in the index mapping, then just the header. */ if (repr_data->name_to_index_mapping[0].class_key == NULL) { repr_data->struct_size = 1; /* avoid 0-byte malloc */ repr_data->struct_align = ALIGNOF(void *); }
/* This works out an allocation strategy for the object. It takes care of * "inlining" storage of attributes that are natively typed, as well as * noting unbox targets. */ static void compute_allocation_strategy(PARROT_INTERP, PMC *repr_info, CStructREPRData *repr_data) { STRING *type_str = Parrot_str_new_constant(interp, "type"); PMC *flat_list; /* * We have to block GC mark here. Because "repr" is assotiated with some * PMC which is not accessible in this function. And we have to write * barrier this PMC because we are poking inside it guts directly. We * do have WB in caller function, but it can be triggered too late is * any of allocation will cause GC run. * * This is kind of minor evil until after I'll find better solution. */ Parrot_block_GC_mark(interp); /* Compute index mapping table and get flat list of attributes. */ flat_list = index_mapping_and_flat_list(interp, repr_info, repr_data); /* If we have no attributes in the index mapping, then just the header. */ if (repr_data->name_to_index_mapping[0].class_key == NULL) { repr_data->struct_size = 1; /* avoid 0-byte malloc */ } /* Otherwise, we need to compute the allocation strategy. */ else { /* We track the size of the struct, which is what we'll want offsets into. */ INTVAL cur_size = 0; /* Get number of attributes and set up various counters. */ INTVAL num_attrs = VTABLE_elements(interp, flat_list); INTVAL info_alloc = num_attrs == 0 ? 1 : num_attrs; INTVAL cur_obj_attr = 0; INTVAL cur_str_attr = 0; INTVAL cur_init_slot = 0; INTVAL i; /* Allocate location/offset arrays and GC mark info arrays. */ repr_data->num_attributes = num_attrs; repr_data->attribute_locations = (INTVAL *) mem_sys_allocate(info_alloc * sizeof(INTVAL)); repr_data->struct_offsets = (INTVAL *) mem_sys_allocate(info_alloc * sizeof(INTVAL)); repr_data->flattened_stables = (STable **) mem_sys_allocate_zeroed(info_alloc * sizeof(PMC *)); repr_data->member_types = (PMC** ) mem_sys_allocate_zeroed(info_alloc * sizeof(PMC *)); /* Go over the attributes and arrange their allocation. */ for (i = 0; i < num_attrs; i++) { /* Fetch its type; see if it's some kind of unboxed type. */ PMC *attr = VTABLE_get_pmc_keyed_int(interp, flat_list, i); PMC *type = VTABLE_get_pmc_keyed_str(interp, attr, type_str); INTVAL type_id = REPR(type)->ID; INTVAL bits = sizeof(void *) * 8; INTVAL align = ALIGNOF1(void *); if (!PMC_IS_NULL(type)) { /* See if it's a type that we know how to handle in a C struct. */ storage_spec spec = REPR(type)->get_storage_spec(interp, STABLE(type)); if (spec.inlineable == STORAGE_SPEC_INLINED && (spec.boxed_primitive == STORAGE_SPEC_BP_INT || spec.boxed_primitive == STORAGE_SPEC_BP_NUM)) { /* It's a boxed int or num; pretty easy. It'll just live in the * body of the struct. Instead of masking in i here (which * would be the parallel to how we handle boxed types) we * repurpose it to store the bit-width of the type, so * that get_attribute_ref can find it later. */ bits = spec.bits; align = spec.align; if (bits % 8) { Parrot_ex_throw_from_c_args(interp, NULL, EXCEPTION_INVALID_OPERATION, "CStruct only supports native types that are a multiple of 8 bits wide (was passed: %ld)", bits); } repr_data->attribute_locations[i] = (bits << CSTRUCT_ATTR_SHIFT) | CSTRUCT_ATTR_IN_STRUCT; repr_data->flattened_stables[i] = STABLE(type); if (REPR(type)->initialize) { if (!repr_data->initialize_slots) repr_data->initialize_slots = (INTVAL *) mem_sys_allocate_zeroed((info_alloc + 1) * sizeof(INTVAL)); repr_data->initialize_slots[cur_init_slot] = i; cur_init_slot++; } } else if(spec.can_box & STORAGE_SPEC_CAN_BOX_STR) { /* It's a string of some kind. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << CSTRUCT_ATTR_SHIFT) | CSTRUCT_ATTR_STRING; repr_data->member_types[i] = type; } else if(type_id == get_ca_repr_id()) { /* It's a CArray of some kind. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << CSTRUCT_ATTR_SHIFT) | CSTRUCT_ATTR_CARRAY; repr_data->member_types[i] = type; } else if(type_id == get_cs_repr_id()) { /* It's a CStruct. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << CSTRUCT_ATTR_SHIFT) | CSTRUCT_ATTR_CSTRUCT; repr_data->member_types[i] = type; } else if(type_id == get_cp_repr_id()) { /* It's a CPointer. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << CSTRUCT_ATTR_SHIFT) | CSTRUCT_ATTR_CPTR; repr_data->member_types[i] = type; } else { Parrot_ex_throw_from_c_args(interp, NULL, EXCEPTION_INVALID_OPERATION, "CStruct representation only implements native int and float members so far"); } } else { Parrot_ex_throw_from_c_args(interp, NULL, EXCEPTION_INVALID_OPERATION, "CStruct representation requires the types of all attributes to be specified"); } /* Do allocation. */ /* C structure needs careful alignment. If cur_size is not aligned * to align bytes (cur_size % align), make sure it is before we * add the next element. */ if (cur_size % align) { cur_size += align - cur_size % align; } repr_data->struct_offsets[i] = cur_size; cur_size += bits / 8; } /* Finally, put computed allocation size in place; it's body size plus * header size. Also number of markables and sentinels. */ repr_data->struct_size = cur_size; if (repr_data->initialize_slots) repr_data->initialize_slots[cur_init_slot] = -1; } Parrot_unblock_GC_mark(interp); }
/* This works out an allocation strategy for the object. It takes care of * "inlining" storage of attributes that are natively typed, as well as * noting unbox targets. */ static void compute_allocation_strategy(PARROT_INTERP, PMC *WHAT, P6opaqueREPRData *repr_data) { STRING *type_str = Parrot_str_new_constant(interp, "type"); STRING *box_target_str = Parrot_str_new_constant(interp, "box_target"); STRING *avcont_str = Parrot_str_new_constant(interp, "auto_viv_container"); PMC *flat_list; /* * We have to block GC mark here. Because "repr" is assotiated with some * PMC which is not accessible in this function. And we have to write * barrier this PMC because we are poking inside it guts directly. We * do have WB in caller function, but it can be triggered too late is * any of allocation will cause GC run. * * This is kind of minor evil until after I'll find better solution. */ Parrot_block_GC_mark(interp); /* Compute index mapping table and get flat list of attributes. */ flat_list = index_mapping_and_flat_list(interp, WHAT, repr_data); /* If we have no attributes in the index mapping, then just the header. */ if (repr_data->name_to_index_mapping[0].class_key == NULL) { repr_data->allocation_size = sizeof(P6opaqueInstance); } /* Otherwise, we need to compute the allocation strategy. */ else { /* We track the size of the body part, since that's what we want offsets into. */ INTVAL cur_size = 0; /* Get number of attributes and set up various counters. */ INTVAL num_attrs = VTABLE_elements(interp, flat_list); INTVAL info_alloc = num_attrs == 0 ? 1 : num_attrs; INTVAL cur_pmc_attr = 0; INTVAL cur_init_slot = 0; INTVAL cur_mark_slot = 0; INTVAL cur_cleanup_slot = 0; INTVAL cur_unbox_slot = 0; INTVAL i; /* Allocate offset array and GC mark info arrays. */ repr_data->num_attributes = num_attrs; repr_data->attribute_offsets = (INTVAL *) mem_sys_allocate(info_alloc * sizeof(INTVAL)); repr_data->flattened_stables = (STable **) mem_sys_allocate_zeroed(info_alloc * sizeof(PMC *)); repr_data->unbox_int_slot = -1; repr_data->unbox_num_slot = -1; repr_data->unbox_str_slot = -1; /* Go over the attributes and arrange their allocation. */ for (i = 0; i < num_attrs; i++) { PMC *attr = VTABLE_get_pmc_keyed_int(interp, flat_list, i); /* Fetch its type and box target flag, if available. */ PMC *type = accessor_call(interp, attr, type_str); PMC *box_target = accessor_call(interp, attr, box_target_str); PMC *av_cont = accessor_call(interp, attr, avcont_str); /* Work out what unboxed type it is, if any. Default to a boxed. */ INTVAL unboxed_type = STORAGE_SPEC_BP_NONE; INTVAL bits = sizeof(PMC *) * 8; if (!PMC_IS_NULL(type)) { /* Get the storage spec of the type and see what it wants. */ storage_spec spec = REPR(type)->get_storage_spec(interp, STABLE(type)); if (spec.inlineable == STORAGE_SPEC_INLINED) { /* Yes, it's something we'll flatten. */ unboxed_type = spec.boxed_primitive; bits = spec.bits; repr_data->flattened_stables[i] = STABLE(type); /* Does it need special initialization? */ if (REPR(type)->initialize) { if (!repr_data->initialize_slots) repr_data->initialize_slots = (INTVAL *) mem_sys_allocate_zeroed((info_alloc + 1) * sizeof(INTVAL)); repr_data->initialize_slots[cur_init_slot] = i; cur_init_slot++; } /* Does it have special GC needs? */ if (REPR(type)->gc_mark) { if (!repr_data->gc_mark_slots) repr_data->gc_mark_slots = (INTVAL *) mem_sys_allocate_zeroed((info_alloc + 1) * sizeof(INTVAL)); repr_data->gc_mark_slots[cur_mark_slot] = i; cur_mark_slot++; } if (REPR(type)->gc_cleanup) { if (!repr_data->gc_cleanup_slots) repr_data->gc_cleanup_slots = (INTVAL *) mem_sys_allocate_zeroed((info_alloc + 1) * sizeof(INTVAL)); repr_data->gc_cleanup_slots[cur_cleanup_slot] = i; cur_cleanup_slot++; } /* Is it a target for box/unbox operations? */ if (!PMC_IS_NULL(box_target) && VTABLE_get_bool(interp, box_target)) { /* If it boxes a primitive, note that. */ switch (unboxed_type) { case STORAGE_SPEC_BP_INT: if (repr_data->unbox_int_slot >= 0) Parrot_ex_throw_from_c_args(interp, NULL, EXCEPTION_INVALID_OPERATION, "Duplicate box_target for native int"); repr_data->unbox_int_slot = i; break; case STORAGE_SPEC_BP_NUM: if (repr_data->unbox_num_slot >= 0) Parrot_ex_throw_from_c_args(interp, NULL, EXCEPTION_INVALID_OPERATION, "Duplicate box_target for native num"); repr_data->unbox_num_slot = i; break; case STORAGE_SPEC_BP_STR: if (repr_data->unbox_str_slot >= 0) Parrot_ex_throw_from_c_args(interp, NULL, EXCEPTION_INVALID_OPERATION, "Duplicate box_target for native str"); repr_data->unbox_str_slot = i; break; default: /* nothing, just suppress 'missing default' warning */ break; } /* Also list in the by-repr unbox list. */ if (repr_data->unbox_slots == NULL) repr_data->unbox_slots = (P6opaqueBoxedTypeMap *) mem_sys_allocate_zeroed(info_alloc * sizeof(P6opaqueBoxedTypeMap)); repr_data->unbox_slots[cur_unbox_slot].repr_id = REPR(type)->ID; repr_data->unbox_slots[cur_unbox_slot].slot = i; cur_unbox_slot++; } } } /* Handle PMC attributes, which need marking and may have auto-viv needs. */ if (unboxed_type == STORAGE_SPEC_BP_NONE) { if (!repr_data->gc_pmc_mark_offsets) repr_data->gc_pmc_mark_offsets = (INTVAL *) mem_sys_allocate_zeroed(info_alloc * sizeof(INTVAL)); repr_data->gc_pmc_mark_offsets[cur_pmc_attr] = cur_size; cur_pmc_attr++; if (!PMC_IS_NULL(av_cont)) { if (!repr_data->auto_viv_values) repr_data->auto_viv_values = (PMC **) mem_sys_allocate_zeroed(info_alloc * sizeof(PMC *)); repr_data->auto_viv_values[i] = av_cont; } } /* Do allocation. */ /* XXX TODO Alignment! Important when we get int1, int8, etc. */ repr_data->attribute_offsets[i] = cur_size; cur_size += bits / 8; } /* Finally, put computed allocation size in place; it's body size plus * header size. Also number of markables and sentinels. */ repr_data->allocation_size = cur_size + sizeof(P6opaqueInstance); repr_data->gc_pmc_mark_offsets_count = cur_pmc_attr; if (repr_data->initialize_slots) repr_data->initialize_slots[cur_init_slot] = -1; if (repr_data->gc_mark_slots) repr_data->gc_mark_slots[cur_mark_slot] = -1; if (repr_data->gc_cleanup_slots) repr_data->gc_cleanup_slots[cur_cleanup_slot] = -1; } Parrot_unblock_GC_mark(interp); }
/* This works out an allocation strategy for the object. It takes care of * "inlining" storage of attributes that are natively typed, as well as * noting unbox targets. */ static void compute_allocation_strategy(MVMThreadContext *tc, MVMObject *repr_info, MVMCStructREPRData *repr_data) { /* Compute index mapping table and get flat list of attributes. */ MVMObject *flat_list = index_mapping_and_flat_list(tc, repr_info, repr_data); /* If we have no attributes in the index mapping, then just the header. */ if (repr_data->name_to_index_mapping[0].class_key == NULL) { repr_data->struct_size = 1; /* avoid 0-byte malloc */ } /* Otherwise, we need to compute the allocation strategy. */ else { /* We track the size of the struct, which is what we'll want offsets into. */ MVMint32 cur_size = 0; /* The structure itself will be the multiple of its biggest element in size. * So we keep track of that biggest element. */ MVMint32 multiple_of = 1; /* Get number of attributes and set up various counters. */ MVMint32 num_attrs = MVM_repr_elems(tc, flat_list); MVMint32 info_alloc = num_attrs == 0 ? 1 : num_attrs; MVMint32 cur_obj_attr = 0; MVMint32 cur_init_slot = 0; MVMint32 i; /* Allocate location/offset arrays and GC mark info arrays. */ repr_data->num_attributes = num_attrs; repr_data->attribute_locations = (MVMint32 *) MVM_malloc(info_alloc * sizeof(MVMint32)); repr_data->struct_offsets = (MVMint32 *) MVM_malloc(info_alloc * sizeof(MVMint32)); repr_data->flattened_stables = (MVMSTable **) MVM_calloc(info_alloc, sizeof(MVMObject *)); repr_data->member_types = (MVMObject **) MVM_calloc(info_alloc, sizeof(MVMObject *)); /* Go over the attributes and arrange their allocation. */ for (i = 0; i < num_attrs; i++) { /* Fetch its type; see if it's some kind of unboxed type. */ MVMObject *attr = MVM_repr_at_pos_o(tc, flat_list, i); MVMObject *type = MVM_repr_at_key_o(tc, attr, tc->instance->str_consts.type); MVMObject *inlined_val = MVM_repr_at_key_o(tc, attr, tc->instance->str_consts.inlined); MVMint64 inlined = !MVM_is_null(tc, inlined_val) && MVM_repr_get_int(tc, inlined_val); MVMint32 bits = sizeof(void *) * 8; MVMint32 align = ALIGNOF(void *); if (!MVM_is_null(tc, type)) { /* See if it's a type that we know how to handle in a C struct. */ const MVMStorageSpec *spec = REPR(type)->get_storage_spec(tc, STABLE(type)); MVMint32 type_id = REPR(type)->ID; if (spec->inlineable == MVM_STORAGE_SPEC_INLINED && (spec->boxed_primitive == MVM_STORAGE_SPEC_BP_INT || spec->boxed_primitive == MVM_STORAGE_SPEC_BP_NUM)) { /* It's a boxed int or num; pretty easy. It'll just live in the * body of the struct. Instead of masking in i here (which * would be the parallel to how we handle boxed types) we * repurpose it to store the bit-width of the type, so * that get_attribute_ref can find it later. */ bits = spec->bits; align = spec->align; repr_data->attribute_locations[i] = (bits << MVM_CSTRUCT_ATTR_SHIFT) | MVM_CSTRUCT_ATTR_IN_STRUCT; repr_data->flattened_stables[i] = STABLE(type); if (REPR(type)->initialize) { if (!repr_data->initialize_slots) repr_data->initialize_slots = (MVMint32 *) MVM_calloc(info_alloc + 1, sizeof(MVMint32)); repr_data->initialize_slots[cur_init_slot] = i; cur_init_slot++; } } else if (spec->can_box & MVM_STORAGE_SPEC_CAN_BOX_STR) { /* It's a string of some kind. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << MVM_CSTRUCT_ATTR_SHIFT) | MVM_CSTRUCT_ATTR_STRING; repr_data->member_types[i] = type; repr_data->flattened_stables[i] = STABLE(type); if (REPR(type)->initialize) { if (!repr_data->initialize_slots) repr_data->initialize_slots = (MVMint32 *) MVM_calloc(info_alloc + 1, sizeof(MVMint32)); repr_data->initialize_slots[cur_init_slot] = i; cur_init_slot++; } } else if (type_id == MVM_REPR_ID_MVMCArray) { /* It's a CArray of some kind. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << MVM_CSTRUCT_ATTR_SHIFT) | MVM_CSTRUCT_ATTR_CARRAY; repr_data->member_types[i] = type; } else if (type_id == MVM_REPR_ID_MVMCStruct) { /* It's a CStruct. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << MVM_CSTRUCT_ATTR_SHIFT) | MVM_CSTRUCT_ATTR_CSTRUCT; repr_data->member_types[i] = type; if (inlined) { MVMCStructREPRData *cstruct_repr_data = (MVMCStructREPRData *)STABLE(type)->REPR_data; bits = cstruct_repr_data->struct_size * 8; align = cstruct_repr_data->struct_size; repr_data->attribute_locations[i] |= MVM_CSTRUCT_ATTR_INLINED; } } else if (type_id == MVM_REPR_ID_MVMCPPStruct) { /* It's a CPPStruct. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << MVM_CSTRUCT_ATTR_SHIFT) | MVM_CSTRUCT_ATTR_CPPSTRUCT; repr_data->member_types[i] = type; if (inlined) { MVMCPPStructREPRData *cppstruct_repr_data = (MVMCPPStructREPRData *)STABLE(type)->REPR_data; bits = cppstruct_repr_data->struct_size * 8; align = cppstruct_repr_data->struct_size; repr_data->attribute_locations[i] |= MVM_CSTRUCT_ATTR_INLINED; } } else if (type_id == MVM_REPR_ID_MVMCUnion) { /* It's a CUnion. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << MVM_CSTRUCT_ATTR_SHIFT) | MVM_CSTRUCT_ATTR_CUNION; repr_data->member_types[i] = type; if (inlined) { MVMCUnionREPRData *cunion_repr_data = (MVMCUnionREPRData *)STABLE(type)->REPR_data; bits = cunion_repr_data->struct_size * 8; align = cunion_repr_data->struct_size; repr_data->attribute_locations[i] |= MVM_CSTRUCT_ATTR_INLINED; } } else if (type_id == MVM_REPR_ID_MVMCPointer) { /* It's a CPointer. */ repr_data->num_child_objs++; repr_data->attribute_locations[i] = (cur_obj_attr++ << MVM_CSTRUCT_ATTR_SHIFT) | MVM_CSTRUCT_ATTR_CPTR; repr_data->member_types[i] = type; } else { MVM_exception_throw_adhoc(tc, "CStruct representation only handles int, num, CArray, CPointer, CStruct, CPPStruct and CUnion"); } } else { MVM_exception_throw_adhoc(tc, "CStruct representation requires the types of all attributes to be specified"); } if (bits % 8) { MVM_exception_throw_adhoc(tc, "CStruct only supports native types that are a multiple of 8 bits wide (was passed: %"PRId32")", bits); } /* Do allocation. */ /* C structure needs careful alignment. If cur_size is not aligned * to align bytes (cur_size % align), make sure it is before we * add the next element. */ if (cur_size % align) { cur_size += align - cur_size % align; } repr_data->struct_offsets[i] = cur_size; cur_size += bits / 8; if (bits / 8 > multiple_of) multiple_of = bits / 8; } /* Finally, put computed allocation size in place; it's body size plus * header size. Also number of markables and sentinels. */ if (multiple_of > sizeof(void *)) multiple_of = sizeof(void *); repr_data->struct_size = ceil((double)cur_size / (double)multiple_of) * multiple_of; if (repr_data->initialize_slots) repr_data->initialize_slots[cur_init_slot] = -1; } }