static void gen_expr_struct_lea(expr *e) { ASSERT_NOT_DOT(); gen_expr(e->lhs); /* cast for void* arithmetic */ out_change_type(type_ptr_to(type_nav_btype(cc1_type_nav, type_void))); out_push_l(type_nav_btype(cc1_type_nav, type_intptr_t), struct_offset(e)); /* integral offset */ out_op(op_plus); if(fopt_mode & FOPT_VERBOSE_ASM) out_comment("struct member %s", e->bits.struct_mem.d->spel); { decl *d = e->bits.struct_mem.d; out_change_type(type_ptr_to(d->ref)); /* set if we're a bitfield - out_deref() and out_store() * i.e. read + write then handle this */ if(d->bits.var.field_width){ unsigned w = const_fold_val_i(d->bits.var.field_width); out_set_bitfield(d->bits.var.struct_offset_bitfield, w); out_comment("struct bitfield lea"); } } }
static void sanitize_assert_order( const out_val *test, enum op_type op, long limit, type *op_type, out_ctx *octx, const char *desc) { const out_val *vlimit = out_new_l( octx, op_type, limit); const out_val *lengthened_test = out_change_type( octx, out_val_retain(octx, test), op_type); const out_val *cmp = out_op(octx, op, lengthened_test, vlimit); sanitize_assert(cmp, octx, desc); }
static void vla_cache_size( type *const qual_t, out_ctx *octx, type *const arith_ty, const out_val *sz, const out_val *stack_ent) { type *ptrsizety = type_ptr_to(arith_ty); dynmap **pvlamap, *vlamap; struct cc1_out_ctx *cc1_octx; /* keep the caller's retain */ out_val_retain(octx, stack_ent); stack_ent = out_change_type(octx, stack_ent, ptrsizety); out_val_retain(octx, stack_ent); /* retain for the vlamap */ out_store(octx, stack_ent, sz); cc1_octx = cc1_out_ctx_or_new(octx); vlamap = *(pvlamap = &cc1_octx->vlamap); if(!vlamap){ /* type * => out_val const* */ vlamap = *pvlamap = dynmap_new(type *, NULL, type_hash); }
static void impl_overlay_mem_reg( out_ctx *octx, unsigned memsz, unsigned nregs, struct vreg regs[], int mem2reg, const out_val *ptr) { const unsigned pws = platform_word_size(); struct vreg *cur_reg = regs; unsigned reg_i = 0; if(memsz == 0){ out_val_release(octx, ptr); return; } UCC_ASSERT( nregs * pws >= memsz, "not enough registers for memory overlay"); out_comment(octx, "overlay, %s2%s(%u)", mem2reg ? "mem" : "reg", mem2reg ? "reg" : "mem", memsz); if(!mem2reg){ /* reserve all registers so we don't accidentally wipe before the spill */ for(reg_i = 0; reg_i < nregs; reg_i++) v_reserve_reg(octx, ®s[reg_i]); } for(;; cur_reg++, reg_i++){ /* read/write whatever size is required */ type *this_ty; unsigned this_sz; if(cur_reg->is_float){ UCC_ASSERT(memsz >= 4, "float for memsz %u?", memsz); this_ty = type_nav_btype( cc1_type_nav, memsz > 4 ? type_double : type_float); }else{ this_ty = type_nav_MAX_FOR(cc1_type_nav, memsz); } this_sz = type_size(this_ty, NULL); UCC_ASSERT(this_sz <= memsz, "reading/writing too much memory"); ptr = out_change_type(octx, ptr, type_ptr_to(this_ty)); out_val_retain(octx, ptr); if(mem2reg){ const out_val *fetched; /* can use impl_deref, as we have a register already, * and know that the memory is an lvalue and not a bitfield * * this means we can load straight into the desired register */ fetched = impl_deref(octx, ptr, cur_reg); UCC_ASSERT(reg_i < nregs, "reg oob"); if(fetched->type != V_REG || !vreg_eq(&fetched->bits.regoff.reg, cur_reg)){ /* move to register */ v_freeup_reg(octx, cur_reg); fetched = v_to_reg_given(octx, fetched, cur_reg); } out_flush_volatile(octx, fetched); v_reserve_reg(octx, cur_reg); /* prevent changes */ }else{ const out_val *vreg = v_new_reg(octx, NULL, this_ty, cur_reg); out_store(octx, ptr, vreg); } memsz -= this_sz; /* early termination */ if(memsz == 0) break; /* increment our memory pointer */ ptr = out_change_type( octx, ptr, type_ptr_to(type_nav_btype(cc1_type_nav, type_uchar))); ptr = out_op(octx, op_plus, ptr, out_new_l( octx, type_nav_btype(cc1_type_nav, type_intptr_t), pws)); } out_val_release(octx, ptr); /* done, unreserve all registers */ for(reg_i = 0; reg_i < nregs; reg_i++) v_unreserve_reg(octx, ®s[reg_i]); }