Ejemplo n.º 1
0
const out_val *gen_expr_assign(const expr *e, out_ctx *octx)
{
	const out_val *val, *store;

	UCC_ASSERT(!e->assign_is_post, "assign_is_post set for non-compound assign");

	assert(!type_is_s_or_u(e->tree_type));

	val = gen_expr(e->rhs, octx);
	store = gen_expr(e->lhs, octx);
	out_val_retain(octx, store);

	out_store(octx, store, val);

	/* re-read from the store,
	 * e.g. if the value has undergone bitfield truncation */
	return out_deref(octx, store);
}
Ejemplo n.º 2
0
const out_val *gen_expr_assign_compound(const expr *e, out_ctx *octx)
{
	/* int += float
	 * lea int, cast up to float, add, cast down to int, store
	 */
	const out_val *saved_post = NULL, *addr_lhs, *rhs, *lhs, *result;

	addr_lhs = gen_expr(e->lhs, octx);

	out_val_retain(octx, addr_lhs); /* 2 */

	if(e->assign_is_post){
		out_val_retain(octx, addr_lhs); /* 3 */
		saved_post = out_deref(octx, addr_lhs); /* addr_lhs=2, saved_post=1 */
	}

	/* delay the dereference until after generating rhs.
	 * this is fine, += etc aren't sequence points
	 */

	rhs = gen_expr(e->rhs, octx);

	/* here's the delayed dereference */
	lhs = out_deref(octx, addr_lhs); /* addr_lhs=1 */
	if(e->bits.compoundop.upcast_ty)
		lhs = out_cast(octx, lhs, e->bits.compoundop.upcast_ty, /*normalise_bool:*/1);

	result = out_op(octx, e->bits.compoundop.op, lhs, rhs);
	gen_op_trapv(e->tree_type, &result, octx, e->bits.compoundop.op);

	if(e->bits.compoundop.upcast_ty) /* need to cast back down to store */
		result = out_cast(octx, result, e->tree_type, /*normalise_bool:*/1);

	if(!saved_post)
		out_val_retain(octx, result);
	out_store(octx, addr_lhs, result);

	if(!saved_post)
		return result;
	return saved_post;
}
void gen_expr_assign_compound(expr *e)
{
	/* int += float
	 * lea int, cast up to float, add, cast down to int, store
	 */
	lea_expr(e->bits.compound_upcast ? expr_cast_child(e->lhs) : e->lhs);

	if(e->assign_is_post){
		out_dup();
		out_deref();
		out_flush_volatile();
		out_swap();
		out_comment("saved for compound op");
	}

	out_dup();
	/* delay the dereference until after generating rhs.
	 * this is fine, += etc aren't sequence points
	 */

	gen_expr(e->rhs);

	/* here's the delayed dereference */
	out_swap();
	out_deref();
	if(e->bits.compound_upcast)
		out_cast(e->lhs->tree_type, /*normalise_bool:*/1);
	out_swap();

	out_op(e->op);

	if(e->bits.compound_upcast) /* need to cast back down to store */
		out_cast(e->tree_type, /*normalise_bool:*/1);

	out_store();

	if(e->assign_is_post)
		out_pop();
}
Ejemplo n.º 4
0
static void vla_cache_size(
		type *const qual_t, out_ctx *octx,
		type *const arith_ty,
		const out_val *sz,
		const out_val *stack_ent)
{
	type *ptrsizety = type_ptr_to(arith_ty);
	dynmap **pvlamap, *vlamap;
	struct cc1_out_ctx *cc1_octx;

	/* keep the caller's retain */
	out_val_retain(octx, stack_ent);

	stack_ent = out_change_type(octx, stack_ent, ptrsizety);
	out_val_retain(octx, stack_ent); /* retain for the vlamap */
	out_store(octx, stack_ent, sz);

	cc1_octx = cc1_out_ctx_or_new(octx);

	vlamap = *(pvlamap = &cc1_octx->vlamap);
	if(!vlamap){
		/* type * => out_val const* */
		vlamap = *pvlamap = dynmap_new(type *, NULL, type_hash);
	}
Ejemplo n.º 5
0
static void impl_overlay_mem_reg(
		out_ctx *octx,
		unsigned memsz, unsigned nregs,
		struct vreg regs[], int mem2reg,
		const out_val *ptr)
{
	const unsigned pws = platform_word_size();
	struct vreg *cur_reg = regs;
	unsigned reg_i = 0;

	if(memsz == 0){
		out_val_release(octx, ptr);
		return;
	}

	UCC_ASSERT(
			nregs * pws >= memsz,
			"not enough registers for memory overlay");

	out_comment(octx,
			"overlay, %s2%s(%u)",
			mem2reg ? "mem" : "reg",
			mem2reg ? "reg" : "mem",
			memsz);

	if(!mem2reg){
		/* reserve all registers so we don't accidentally wipe before the spill */
		for(reg_i = 0; reg_i < nregs; reg_i++)
			v_reserve_reg(octx, &regs[reg_i]);
	}

	for(;; cur_reg++, reg_i++){
		/* read/write whatever size is required */
		type *this_ty;
		unsigned this_sz;

		if(cur_reg->is_float){
			UCC_ASSERT(memsz >= 4, "float for memsz %u?", memsz);

			this_ty = type_nav_btype(
					cc1_type_nav,
					memsz > 4 ? type_double : type_float);

		}else{
			this_ty = type_nav_MAX_FOR(cc1_type_nav, memsz);
		}
		this_sz = type_size(this_ty, NULL);

		UCC_ASSERT(this_sz <= memsz, "reading/writing too much memory");

		ptr = out_change_type(octx, ptr, type_ptr_to(this_ty));

		out_val_retain(octx, ptr);

		if(mem2reg){
			const out_val *fetched;

			/* can use impl_deref, as we have a register already,
			 * and know that the memory is an lvalue and not a bitfield
			 *
			 * this means we can load straight into the desired register
			 */
			fetched = impl_deref(octx, ptr, cur_reg);

			UCC_ASSERT(reg_i < nregs, "reg oob");

			if(fetched->type != V_REG || !vreg_eq(&fetched->bits.regoff.reg, cur_reg)){
				/* move to register */
				v_freeup_reg(octx, cur_reg);
				fetched = v_to_reg_given(octx, fetched, cur_reg);
			}
			out_flush_volatile(octx, fetched);
			v_reserve_reg(octx, cur_reg); /* prevent changes */

		}else{
			const out_val *vreg = v_new_reg(octx, NULL, this_ty, cur_reg);

			out_store(octx, ptr, vreg);
		}

		memsz -= this_sz;

		/* early termination */
		if(memsz == 0)
			break;

		/* increment our memory pointer */
		ptr = out_change_type(
				octx,
				ptr,
				type_ptr_to(type_nav_btype(cc1_type_nav, type_uchar)));

		ptr = out_op(octx, op_plus,
				ptr,
				out_new_l(
					octx,
					type_nav_btype(cc1_type_nav, type_intptr_t),
					pws));
	}

	out_val_release(octx, ptr);

	/* done, unreserve all registers */
	for(reg_i = 0; reg_i < nregs; reg_i++)
		v_unreserve_reg(octx, &regs[reg_i]);
}