unsigned get_type_size(type_t *type) { switch (type->kind) { case TYPE_ERROR: return 0; case TYPE_ATOMIC: case TYPE_IMAGINARY: case TYPE_ENUM: return get_atomic_type_size(type->atomic.akind); case TYPE_COMPLEX: return get_atomic_type_size(type->atomic.akind) * 2; case TYPE_COMPOUND_UNION: layout_union_type(&type->compound); return type->compound.compound->size; case TYPE_COMPOUND_STRUCT: layout_struct_type(&type->compound); return type->compound.compound->size; case TYPE_FUNCTION: return 0; /* non-const (but "address-const") */ case TYPE_REFERENCE: case TYPE_POINTER: return pointer_properties.size; case TYPE_ARRAY: { /* TODO: correct if element_type is aligned? */ il_size_t element_size = get_type_size(type->array.element_type); return type->array.size * element_size; } case TYPE_TYPEDEF: return get_type_size(type->typedeft.typedefe->type); case TYPE_TYPEOF: return get_type_size(type->typeoft.typeof_type); } panic("invalid type in get_type_size"); }
unsigned get_type_size(type_t const *const type) { switch (type->kind) { case TYPE_ERROR: return 0; case TYPE_ATOMIC: case TYPE_IMAGINARY: case TYPE_ENUM: return get_atomic_type_size(type->atomic.akind); case TYPE_COMPLEX: return get_atomic_type_size(type->atomic.akind) * 2; case TYPE_COMPOUND_STRUCT: case TYPE_COMPOUND_UNION: return type->compound.compound->size; case TYPE_FUNCTION: case TYPE_VOID: return 1; /* GCC extension. */ case TYPE_REFERENCE: case TYPE_POINTER: return pointer_properties.size; case TYPE_ARRAY: { /* TODO: correct if element_type is aligned? */ il_size_t element_size = get_type_size(type->array.element_type); return type->array.size * element_size; } case TYPE_TYPEDEF: return get_type_size(type->typedeft.typedefe->type); case TYPE_TYPEOF: return get_type_size(type->typeoft.typeof_type); case TYPE_BUILTIN_TEMPLATE: break; } panic("invalid type"); }
static entity_t *pack_bitfield_members(il_size_t *struct_offset, il_alignment_t *struct_alignment, bool packed, entity_t *first) { il_size_t offset = *struct_offset; il_alignment_t alignment = *struct_alignment; size_t bit_offset = 0; entity_t *member; for (member = first; member != NULL; member = member->base.next) { if (member->kind != ENTITY_COMPOUND_MEMBER) continue; if (!member->compound_member.bitfield) break; type_t *const base_type = skip_typeref(member->declaration.type); il_alignment_t base_alignment = get_type_alignment_compound(base_type); il_alignment_t alignment_mask = base_alignment-1; if (base_alignment > alignment) alignment = base_alignment; size_t bit_size = member->compound_member.bit_size; if (!packed) { bit_offset += (offset & alignment_mask) * BITS_PER_BYTE; offset &= ~alignment_mask; size_t base_size = get_type_size(base_type) * BITS_PER_BYTE; if (bit_offset + bit_size > base_size || bit_size == 0) { offset += (bit_offset+BITS_PER_BYTE-1) / BITS_PER_BYTE; offset = (offset + base_alignment-1) & ~alignment_mask; bit_offset = 0; } } if (byte_order_big_endian) { size_t base_size = get_type_size(base_type) * BITS_PER_BYTE; member->compound_member.offset = offset & ~alignment_mask; member->compound_member.bit_offset = base_size - bit_offset - bit_size; } else { member->compound_member.offset = offset; member->compound_member.bit_offset = bit_offset; } bit_offset += bit_size; offset += bit_offset / BITS_PER_BYTE; bit_offset %= BITS_PER_BYTE; } if (bit_offset > 0) offset += 1; *struct_offset = offset; *struct_alignment = alignment; return member; }
void sa_pod_stack_backend::write_to_the_array_u8( const u8* to_write_u8, u32 non_u8_index ) { u32 start_index_u8 = arr_byte_index_macro( get_type_size(), non_u8_index ); for ( u32 i=0; i<get_type_size(); ++i ) { get_the_array_u8()[start_index_u8 + i] = to_write_u8[i]; } }
static ir_entity *emit_interface_table(ir_type *classtype) { class_t *linked_class = (class_t*) oo_get_type_link(classtype); assert(linked_class); uint16_t n_interfaces = linked_class->n_interfaces; if (n_interfaces == 0) return NULL; ir_type *type_array = new_type_array(type_reference, n_interfaces); unsigned size = n_interfaces * get_type_size(type_reference); set_type_size(type_array, size); ir_initializer_t *init = create_initializer_compound(n_interfaces); for (uint16_t i = 0; i < n_interfaces; i++) { uint16_t iface_ref = linked_class->interfaces[i]; constant_classref_t *clsref = (constant_classref_t*) linked_class->constants[iface_ref]; constant_utf8_string_t *clsname = (constant_utf8_string_t*) linked_class->constants[clsref->name_index]; ir_type *type = class_registry_get(clsname->bytes); assert(type); ir_entity *rtti_entity = gcji_get_rtti_entity(type); assert(rtti_entity != NULL); set_compound_init_entref(init, i, rtti_entity); } ident *id = id_unique("_IF"); ir_entity *if_ent = new_entity(get_glob_type(), id, type_array); set_entity_initializer(if_ent, init); set_entity_ld_ident(if_ent, id); return if_ent; }
static int get_size( tree t ) { enum tree_code tc; tc = TREE_CODE( t ); int size = -1; switch ( tc ) { case BOOLEAN_TYPE: case INTEGER_TYPE: case REAL_TYPE: size = get_type_size( t ); break; case POINTER_TYPE: break; default: fprintf( stderr, "myproof: get_size(): %s is not handled\n", tree_code_name[tc] ); gcc_unreachable( ); } return size; }
void layout_union_type(compound_type_t *type) { assert(type->compound != NULL); compound_t *compound = type->compound; if (! compound->complete) return; il_size_t size = 0; il_alignment_t alignment = compound->alignment; entity_t *entry = compound->members.entities; for (; entry != NULL; entry = entry->base.next) { if (entry->kind != ENTITY_COMPOUND_MEMBER) continue; type_t *m_type = skip_typeref(entry->declaration.type); if (! is_type_valid(skip_typeref(m_type))) continue; entry->compound_member.offset = 0; il_size_t m_size = get_type_size(m_type); if (m_size > size) size = m_size; il_alignment_t m_alignment = get_type_alignment_compound(m_type); if (m_alignment > alignment) alignment = m_alignment; } size = (size + alignment - 1) & -alignment; compound->size = size; compound->alignment = alignment; }
/* * The 64-bit version of libgcc does not contain some builtin * functions for 32-bit values (__<builtin>si2) anymore. */ static void widen_builtin(ir_node *node) { ir_type *mtp = get_Builtin_type(node); ir_type *arg1 = get_method_param_type(mtp, 0); // Nothing to do, if argument size is at least machine size. if (get_type_size(arg1) >= ir_target_pointer_size()) return; // Only touch builtins with no 32-bit version. ir_builtin_kind kind = get_Builtin_kind(node); if (kind != ir_bk_clz && kind != ir_bk_ctz && kind != ir_bk_ffs && kind != ir_bk_parity && kind != ir_bk_popcount) { return; } ir_mode *target_mode = get_reference_offset_mode(mode_P); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *op = get_irn_n(node, n_Builtin_max + 1); ir_node *conv = new_rd_Conv(dbgi, block, op, target_mode); set_irn_n(node, n_Builtin_max + 1, conv); ir_type *new_arg1 = get_type_for_mode(target_mode); ir_type *new_result = get_method_res_type(mtp, 0); ir_type *new_type = new_type_method(1, 1, false, cc_cdecl_set, mtp_no_property); set_method_param_type(new_type, 0, new_arg1); set_method_res_type(new_type, 0, new_result); set_Builtin_type(node, new_type); }
inline void init_var_fixed(void *data,size_t len,int fixsize,TYPE type){ size_t i; int size=get_type_size(type); for(i=0;i<len;i++){ fixed(((char *)data+i*size),fixsize,type); } }
inline void init_var(void *data,size_t len,double min, double max,TYPE type){ size_t i; int size=get_type_size(type); for(i=0;i<len;i++){ random(((char *)data+i*size),min,max,type); } }
static ir_entity *emit_field_table(ir_type *classtype) { class_t *linked_class = (class_t*) oo_get_type_link(classtype); assert(linked_class); uint16_t n_fields = linked_class->n_fields; if (n_fields == 0) return NULL; ir_type *type_array = new_type_array(type_field_desc, n_fields); unsigned size = n_fields * get_type_size(type_field_desc); set_type_size(type_array, size); ir_initializer_t *init = create_initializer_compound(n_fields); for (uint16_t i = 0; i < n_fields; i++) { ir_entity *field = linked_class->fields[i]->link; ir_initializer_t *desc = get_field_desc(classtype, field); set_initializer_compound_value(init, i, desc); } ident *id = id_unique("_FT"); ir_entity *ft_ent = new_entity(get_glob_type(), id, type_array); set_entity_initializer(ft_ent, init); set_entity_ld_ident(ft_ent, id); return ft_ent; }
void init_dimvar(void *data,size_t len,TYPE type){ size_t i; int size=get_type_size(type); for(i=0;i<len;i++){ /* random(((char *)data+i*size),0,len*2,type);*/ getval(((char *)data+i*size),i,type); } }
static unsigned int get_type_alignment(unsigned long *pFlags, VARIANT *pvar) { unsigned int size = get_type_size(pFlags, pvar); if(V_VT(pvar) & VT_BYREF) return 3; if(size == 0) return 0; if(size <= 4) return size - 1; return 7; }
static const char *get_gcc_machmode(ir_type *type) { assert(is_Primitive_type(type)); switch (get_type_size(type)) { case 4: return "si"; case 8: return "di"; default: panic("couldn't determine gcc machmode for type %+F", type); } }
inline size_t get_type_size(basetype_t *type) { if (!type) return 0; if (is_alias(type->ohm_type) && type->elems && type->elems[0]) return get_type_size(type->elems[0]); return type->size; }
int getSize(c_tree tree) { int size = 1; int code = TREE_CODE(tree); switch (code) { case TREE_EXPR : size = getSizeOfExpr(tree); break; case TREE_DECL : size = get_type_size(TREE_TYPE(tree)); break; case TREE_TYPE : size = get_type_size(tree); break; default : size = 1; break; } return size; }
static void lower_copyb_node(ir_node *irn) { ir_type *tp = get_CopyB_type(irn); unsigned size = get_type_size(tp); if (size <= max_small_size) lower_small_copyb_node(irn); else if (size >= min_large_size) lower_large_copyb_node(irn); else panic("CopyB of invalid size"); }
int getSizeOfExpr(c_tree tree) { int size = 1; int type = EXPR_TYPE(tree); switch (type) { case MEMBER_EXPR : { size = get_type_size(TREE_TYPE(tree)); break; } case ARRAY_REF_EXPR : { c_tree arrayVarName = TREE_EXPR_OPERAND(tree,0); size = get_type_size(TREE_TYPE(arrayVarName)->type.info.a.type); break; } default : break; } return size; }
int basicExprWithPointerArthCodegen(c_tree tree, int *registerNo, int topLevel, BasicAsmExprPtr asmFunc, BasicAsmExprPtr immiAsmfunc, int getAddress, const char *breakLabel, const char *continueLabel) { // to implement pointer arthmetic if (!getAddress) return basicExprCodegen(tree, registerNo, topLevel, asmFunc, immiAsmfunc, getAddress, breakLabel, continueLabel); c_tree op2 = TREE_EXPR_OPERAND(tree,1); c_tree op1 = TREE_EXPR_OPERAND(tree,0); if (!topLevel) { int op1Register = c_codegen_recurse(op1, registerNo, topLevel, getAddress, breakLabel, continueLabel); int typeSize = get_type_size(TYPE(TREE_TYPE(op1))->info.reftype); int resultReg; if (TREE_CODE(op2) == TREE_INTEGER_CST) { resultReg = (*registerNo)++; immiAsmfunc(resultReg, op1Register, typeSize * INTEGER_CST(op2)->val, "Pointer Arthmetic", NULL); } else { int op2Register = c_codegen_recurse(op2, registerNo, topLevel, getAddress, breakLabel, continueLabel); int mulReg = (*registerNo)++; resultReg = (*registerNo)++; MULi(mulReg, op1Register, typeSize, "calculating size for Pointer Arthmetic", NULL); asmFunc(resultReg, op1Register, op2Register, "Pointer Arthmetic", NULL); } return resultReg; } else { // evaluate the LHS and RHS....but dont do the operation c_codegen_recurse(op1, registerNo, topLevel, getAddress, breakLabel, continueLabel); c_codegen_recurse(op2, registerNo, topLevel, getAddress, breakLabel, continueLabel); return *registerNo; } }
void gcji_create_array_type(void) { ident *id = new_id_from_str("array"); type_jarray = new_type_class(id); assert(type_java_lang_object != NULL); add_class_supertype(type_jarray, type_java_lang_object); add_compound_member(type_jarray, superobject_ident, type_java_lang_object); ident *length_id = new_id_from_str("length"); gcj_array_length = add_compound_member(type_jarray, length_id, type_int); default_layout_compound_type(type_jarray); array_header_size = get_type_size(type_jarray); array_header_end_align = get_type_alignment(type_int); }
void GPUPipeline::draw_elements(GLenum mode, GLsizei count, long offset, GLenum type) { if (_shader_mvp_input) { calculate_vp_transform(); Math::Matrix4<float> mvp_transform = _m_transform * _vp_transform; float trans[16]; mvp_transform.copy_data(trans, 16); bind_shader_input(trans, *_shader_mvp_input); } glDrawElements(mode, count, type, (void*)(offset * get_type_size(type))); }
static void introduce_epilog(ir_node *ret) { arch_register_t const *const sp_reg = &arm_registers[REG_SP]; assert(arch_get_irn_register_req_in(ret, n_arm_Return_sp) == sp_reg->single_req); ir_node *const sp = get_irn_n(ret, n_arm_Return_sp); ir_node *const block = get_nodes_block(ret); ir_graph *const irg = get_irn_irg(ret); ir_type *const frame_type = get_irg_frame_type(irg); unsigned const frame_size = get_type_size(frame_type); ir_node *const incsp = be_new_IncSP(sp_reg, block, sp, -frame_size, true); set_irn_n(ret, n_arm_Return_sp, incsp); sched_add_before(ret, incsp); }
int arrayRefCodegen(c_tree tree, int *registerNo, int topLevel, int getAddress, const char *breakLabel, const char *continueLabel) { c_tree index = TREE_EXPR_OPERAND(tree,1); if (!topLevel) { c_tree array = TREE_EXPR_OPERAND(tree,0); int startingAddReg = c_codegen_recurse(array, registerNo, FALSE, TRUE, breakLabel, continueLabel); int indexReg = c_codegen_recurse(index, registerNo, FALSE, FALSE, breakLabel, continueLabel); int size = get_type_size(TREE_TYPE(array)->type.info.a.type); int offsetFromStartingAddressReg = indexReg; if (size > 1) { offsetFromStartingAddressReg = (*registerNo)++; MULi(offsetFromStartingAddressReg, indexReg, size, "calculate offset from start of array", NULL); } if (!getAddress) { int resultReg = (*registerNo)++; LDR(resultReg, startingAddReg, offsetFromStartingAddressReg, "load the array value into register", NULL); return resultReg; } else { int resultReg = (*registerNo)++; ADD(resultReg, startingAddReg, offsetFromStartingAddressReg, "getting address of array at index location", NULL); return resultReg; } } else { c_codegen_recurse(index, registerNo, topLevel, FALSE, breakLabel, continueLabel); return *registerNo; } }
/** * Post-Walker: find CopyB nodes. */ static void find_copyb_nodes(ir_node *irn, void *ctx) { if (!is_CopyB(irn)) return; ir_type *tp = get_CopyB_type(irn); if (get_type_state(tp) != layout_fixed) return; unsigned size = get_type_size(tp); bool medium_sized = max_small_size < size && size < min_large_size; if (medium_sized) return; /* Nothing to do for medium-sized CopyBs. */ /* Okay, either small or large CopyB, so link it in and lower it later. */ walk_env_t *env = (walk_env_t*)ctx; ARR_APP1(ir_node*, env->copybs, irn); }
/** * Turn a small CopyB node into a series of Load/Store nodes. */ static void lower_small_copyb_node(ir_node *irn) { ir_graph *irg = get_irn_irg(irn); dbg_info *dbgi = get_irn_dbg_info(irn); ir_node *block = get_nodes_block(irn); ir_type *tp = get_CopyB_type(irn); ir_node *addr_src = get_CopyB_src(irn); ir_node *addr_dst = get_CopyB_dst(irn); ir_node *mem = get_CopyB_mem(irn); ir_mode *mode_ref = get_irn_mode(addr_src); unsigned mode_bytes = allow_misalignments ? native_mode_bytes : get_type_alignment(tp); unsigned size = get_type_size(tp); unsigned offset = 0; bool is_volatile = get_CopyB_volatility(irn) == volatility_is_volatile; ir_cons_flags flags = is_volatile ? cons_volatile : cons_none; while (offset < size) { ir_mode *mode = get_ir_mode(mode_bytes); for (; offset + mode_bytes <= size; offset += mode_bytes) { ir_mode *mode_ref_int = get_reference_offset_mode(mode_ref); /* construct offset */ ir_node *addr_const = new_r_Const_long(irg, mode_ref_int, offset); ir_node *add = new_r_Add(block, addr_src, addr_const); ir_node *load = new_rd_Load(dbgi, block, mem, add, mode, tp, flags); ir_node *load_res = new_r_Proj(load, mode, pn_Load_res); ir_node *load_mem = new_r_Proj(load, mode_M, pn_Load_M); ir_node *addr_const2 = new_r_Const_long(irg, mode_ref_int, offset); ir_node *add2 = new_r_Add(block, addr_dst, addr_const2); ir_node *store = new_rd_Store(dbgi, block, load_mem, add2, load_res, tp, flags); ir_node *store_mem = new_r_Proj(store, mode_M, pn_Store_M); mem = store_mem; } mode_bytes /= 2; } exchange(irn, mem); }
/** * Completion callback */ void cb_amreduce_done (void *context, void * clientdata, pami_result_t err) { validation_t *v = (validation_t*)clientdata; volatile unsigned *active = (volatile unsigned *) v->cookie; DEBUG((stderr, "cb_amreduce_done(): cookie= %p value=%u\n", active, *active)); if(gVerbose) { check_context((pami_context_t)context); } if(my_task_id == v->root) { int rc_check; size_t type_sz = get_type_size(dt_array[v->dt]); _gRc |= rc_check = reduce_check_rcvbuf (_g_recv_buffer, v->bytes/type_sz, v->op, v->dt, my_task_id, num_tasks); if (rc_check) fprintf(stderr, "%s FAILED validation\n", gProtocolName); } (*active)++; }
/** * Turn a large CopyB node into a memcpy call. */ static void lower_large_copyb_node(ir_node *irn) { ir_graph *irg = get_irn_irg(irn); ir_node *block = get_nodes_block(irn); dbg_info *dbgi = get_irn_dbg_info(irn); ir_node *mem = get_CopyB_mem(irn); ir_node *addr_src = get_CopyB_src(irn); ir_node *addr_dst = get_CopyB_dst(irn); ir_type *copyb_tp = get_CopyB_type(irn); unsigned size = get_type_size(copyb_tp); ir_node *callee = get_memcpy_address(irg); ir_type *call_tp = get_memcpy_methodtype(); ir_mode *mode_size_t = get_ir_mode(native_mode_bytes); ir_node *size_cnst = new_r_Const_long(irg, mode_size_t, size); ir_node *in[] = { addr_dst, addr_src, size_cnst }; ir_node *call = new_rd_Call(dbgi, block, mem, callee, ARRAY_SIZE(in), in, call_tp); ir_node *call_mem = new_r_Proj(call, mode_M, pn_Call_M); exchange(irn, call_mem); }
void refresh_compound_sizes(void) { int c, sz, i, nmemb; basetype_t *t0, *t1; for (c = 0; c < types_table_size; c++) { if (is_array(types_table[c].ohm_type)) { sz = get_type_size(types_table[c].elems[0]); types_table[c].size = types_table[c].nelem * sz; } else if (is_struct(types_table[c].ohm_type)) { nmemb = types_table[c].nelem; if (!nmemb) continue; for (i = 0; i < nmemb-1; ++i) { t0 = types_table[c].elems[i]; t1 = types_table[c].elems[i+1]; t0->size = t1->size - t0->size; } t0 = types_table[c].elems[nmemb-1]; t0->size = types_table[c].size - t0->size; } } }
static long get_offsetof_offset(const offsetof_expression_t *expression) { type_t *orig_type = expression->type; long offset = 0; designator_t *designator = expression->designator; for ( ; designator != NULL; designator = designator->next) { type_t *type = skip_typeref(orig_type); if (designator->symbol != NULL) { assert(is_type_compound(type)); symbol_t *symbol = designator->symbol; compound_t *compound = type->compound.compound; entity_t *iter = compound->members.first_entity; for (; iter->base.symbol != symbol; iter = iter->base.next) {} assert(iter->kind == ENTITY_COMPOUND_MEMBER); offset += iter->compound_member.offset; orig_type = iter->declaration.type; } else { expression_t *array_index = designator->array_index; assert(designator->array_index != NULL); assert(is_type_array(type)); long index_long = fold_expression_to_int(array_index); type_t *element_type = type->array.element_type; long element_size = get_type_size(element_type); /* TODO: check for overflow */ offset += index_long * element_size; orig_type = type->array.element_type; } } return offset; }
unsigned long WINAPI VARIANT_UserSize(unsigned long *pFlags, unsigned long Start, VARIANT *pvar) { int align; TRACE("(%lx,%ld,%p)\n", *pFlags, Start, pvar); TRACE("vt=%04x\n", V_VT(pvar)); ALIGN_LENGTH(Start, 7); Start += sizeof(variant_wire_t); if(V_VT(pvar) & VT_BYREF) Start += 4; align = get_type_alignment(pFlags, pvar); ALIGN_LENGTH(Start, align); if(V_VT(pvar) == (VT_VARIANT | VT_BYREF)) Start += 4; else Start += get_type_size(pFlags, pvar); Start = wire_extra_user_size(pFlags, Start, pvar); TRACE("returning %ld\n", Start); return Start; }