Exemplo n.º 1
0
/**
 * Check, whether a Return can be moved on block upwards.
 *
 * In a block with a Return, all live nodes must be linked
 * with the Return, otherwise they are dead (because the Return leaves
 * the graph, so no more users of the other nodes can exists.
 *
 * We can move a Return, if its predecessors are Phi nodes or
 * comes from another block. In the later case, it is always possible
 * to move the Return one block up, because the predecessor block must
 * dominate the Return block (SSA) and then it dominates the predecessor
 * block of the Return block as well.
 *
 * All predecessors of the Return block must be Jmp's of course, or we
 * cannot move it up, so we add blocks if needed.
 */
static bool can_move_ret(ir_node *ret)
{
    ir_node *retbl = get_nodes_block(ret);
    int i, n = get_irn_arity(ret);

    for (i = 0; i < n; ++i) {
        ir_node *pred = get_irn_n(ret, i);

        if (! is_Phi(pred) && retbl == get_nodes_block(pred)) {
            /* first condition failed, found a non-Phi predecessor
             * then is in the Return block */
            return false;
        }
    }

    /* check, that predecessors are Jmps */
    n = get_Block_n_cfgpreds(retbl);
    /* we cannot move above a labeled block, as this might kill the block */
    if (n <= 1 || get_Block_entity(retbl) != NULL)
        return false;
    for (i = 0; i < n; ++i) {
        ir_node *pred = get_Block_cfgpred(retbl, i);

        pred = skip_Tuple(pred);
        if (! is_Jmp(pred) && !is_Bad(pred)) {
            /* simply place a new block here */
            ir_graph *irg  = get_irn_irg(retbl);
            ir_node *block = new_r_Block(irg, 1, &pred);
            ir_node *jmp   = new_r_Jmp(block);
            set_Block_cfgpred(retbl, i, jmp);
        }
    }
    return true;
}
Exemplo n.º 2
0
/**
 * Check, if we can reach a target node from a given node inside one basic block.
 * @param h    The heights object.
 * @param curr The current node from which we tried to reach the other one.
 * @param tgt  The node we try to reach.
 * @return     1, one of tgt can be reached from curr, 0 else.
 */
static bool search(ir_heights_t *h, const ir_node *curr, const ir_node *tgt)
{
	/* if the current node is the one we were looking for, we're done. */
	if (curr == tgt)
		return true;

	/* If we are in another block or at a phi we won't find our target. */
	if (get_nodes_block(curr) != get_nodes_block(tgt))
		return false;
	if (is_Phi(curr))
		return false;

	/* Check, if we have already been here. Coming more often won't help :-) */
	irn_height_t *h_curr = maybe_get_height_data(h, curr);
	if (h_curr->visited >= h->visited)
		return false;

	/* If we are too deep into the DAG we won't find the target either. */
	irn_height_t *h_tgt = maybe_get_height_data(h, tgt);
	if (h_curr->height > h_tgt->height)
		return false;

	/* Mark this place as visited. */
	h_curr->visited = h->visited;

	/* Start a search from this node. */
	foreach_irn_in(curr, i, op) {
		if (search(h, op, tgt))
			return true;
	}

	return false;
}
Exemplo n.º 3
0
/**
 * Pre-Walker called by compute_callgraph(), analyses all Call nodes.
 */
static void ana_Call(ir_node *n, void *env)
{
	(void)env;
	if (!is_Call(n))
		return;

	ir_graph *irg = get_irn_irg(n);
	for (size_t i = 0, n_callees = cg_get_call_n_callees(n); i < n_callees;
	     ++i) {
		ir_entity *callee_e = cg_get_call_callee(n, i);
		ir_graph  *callee   = get_entity_linktime_irg(callee_e);

		if (callee) {
			cg_callee_entry buf;
			buf.irg = callee;
			pset_insert((pset *)callee->callers, irg, hash_ptr(irg));
			cg_callee_entry *found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, hash_ptr(callee));
			if (found) {  /* add Call node to list, compute new nesting. */
				ir_node **arr = found->call_list;
				ARR_APP1(ir_node *, arr, n);
				found->call_list = arr;
			} else { /* New node, add Call node and init nesting. */
				found = OALLOC(get_irg_obstack(irg), cg_callee_entry);
				found->irg = callee;
				found->call_list = NEW_ARR_F(ir_node *, 1);
				found->call_list[0] = n;
				found->max_depth = 0;
				pset_insert((pset *)irg->callees, found, hash_ptr(callee));
			}
			unsigned depth = get_loop_depth(get_irn_loop(get_nodes_block(n)));
			found->max_depth = MAX(found->max_depth, depth);
		}
	}
}
Exemplo n.º 4
0
void dmemory_lower_Alloc(ir_node *node)
{
	assert(is_Alloc(node));

	if (get_Alloc_where(node) != heap_alloc)
		return;

	ir_graph *irg     = get_irn_irg(node);
	ir_type  *type    = get_Alloc_type(node);
	ir_node  *count   = get_Alloc_count(node);
	ir_node  *res     = NULL;
	ir_node  *cur_mem = get_Alloc_mem(node);
	ir_node  *block   = get_nodes_block(node);

	if (is_Class_type(type)) {
		res = (*dmemory_model.alloc_object)(type, irg, block, &cur_mem);
		ddispatch_prepare_new_instance(type, res, irg, block, &cur_mem);
	} else if (is_Array_type(type)) {
		ir_type *eltype  = get_array_element_type(type);
		res = (*dmemory_model.alloc_array)(eltype, count, irg, block, &cur_mem);
	} else {
		assert (0);
	}

	turn_into_tuple(node, pn_Alloc_max);
	set_irn_n(node, pn_Alloc_M, cur_mem);
	set_irn_n(node, pn_Alloc_res, res);
}
Exemplo n.º 5
0
static void replace_with_call(ir_node *node)
{
	widen_builtin(node);

	ir_type        *const mtp      = get_Builtin_type(node);
	ir_builtin_kind const kind     = get_Builtin_kind(node);
	char     const *const name     = get_builtin_name(kind);
	ir_type        *const arg1     = get_method_param_type(mtp, 0);
	char     const *const machmode = get_gcc_machmode(arg1);
	ident          *const id       = new_id_fmt("__%s%s2", name, machmode);
	ir_entity      *const entity
		= create_compilerlib_entity(get_id_str(id), mtp);

	dbg_info *const dbgi      = get_irn_dbg_info(node);
	ir_node  *const block     = get_nodes_block(node);
	ir_node  *const mem       = get_Builtin_mem(node);
	ir_graph *const irg       = get_irn_irg(node);
	ir_node  *const callee    = new_r_Address(irg, entity);
	int       const n_params  = get_Builtin_n_params(node);
	ir_node **const params    = get_Builtin_param_arr(node);
	ir_node  *const call      = new_rd_Call(dbgi, block, mem, callee, n_params, params, mtp);
	ir_node  *const call_mem  = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node  *const call_ress = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_type  *const res_type  = get_method_res_type(mtp, 0);
	ir_mode  *const res_mode  = get_type_mode(res_type);
	ir_node  *const call_res  = new_r_Proj(call_ress, res_mode, 0);

	ir_node *const in[] = {
		[pn_Builtin_M]       = call_mem,
		[pn_Builtin_max + 1] = call_res,
	};
Exemplo n.º 6
0
	static void walk_topo_helper(ir_node* irn, std::function<void (ir_node*, void*)> walker, void* env)
	{
		if (irn_visited(irn))
			return;

		/* only break loops at phi/block nodes */
		const bool is_loop_breaker = is_Phi(irn) || is_Block(irn);

		if (is_loop_breaker)
			mark_irn_visited(irn);

		if (!is_Block(irn))
		{
			ir_node* block = get_nodes_block(irn);

			if (block != NULL)
				walk_topo_helper(block, walker, env);
		}

		for (int i = 0; i < get_irn_arity(irn); ++i)
		{
			ir_node* pred = get_irn_n(irn, i);
			walk_topo_helper(pred, walker, env);
		}

		if (is_loop_breaker || !irn_visited(irn))
			walker(irn, env);

		mark_irn_visited(irn);
	}
Exemplo n.º 7
0
/**
 * lower Alloca nodes to allocate "bytes" instead of a certain type
 */
static void lower_alloca_free(ir_node *node, void *data)
{
	(void) data;
	if (is_Alloc(node)) {
	} else if (is_Proj(node)) {
		ir_node *proj_pred = get_Proj_pred(node);
		if (is_Alloc(proj_pred)) {
			transform_Proj_Alloc(node);
		}
		return;
	} else {
		return;
	}
	if (!ir_nodeset_insert(&transformed, node))
		return;

	if (stack_alignment <= 1)
		return;

	ir_node  *const size     = get_Alloc_size(node);
	ir_node  *const mem      = get_Alloc_mem(node);
	ir_node  *const block    = get_nodes_block(node);
	dbg_info *const dbgi     = get_irn_dbg_info(node);
	ir_node  *const new_size = adjust_alloc_size(dbgi, size, block);
	ir_node  *const new_node
		= new_rd_Alloc(dbgi, block, mem, new_size, 1);
	ir_nodeset_insert(&transformed, new_node);

	if (new_node != node)
		exchange(node, new_node);
}
Exemplo n.º 8
0
/*
 * The 64-bit version of libgcc does not contain some builtin
 * functions for 32-bit values (__<builtin>si2) anymore.
 */
static void widen_builtin(ir_node *node)
{
	ir_type *mtp  = get_Builtin_type(node);
	ir_type *arg1 = get_method_param_type(mtp, 0);

	// Nothing to do, if argument size is at least machine size.
	if (get_type_size(arg1) >= ir_target_pointer_size())
		return;

	// Only touch builtins with no 32-bit version.
	ir_builtin_kind kind = get_Builtin_kind(node);
	if (kind != ir_bk_clz    &&
	    kind != ir_bk_ctz    &&
	    kind != ir_bk_ffs    &&
	    kind != ir_bk_parity &&
	    kind != ir_bk_popcount) {
		return;
	}

	ir_mode  *target_mode = get_reference_offset_mode(mode_P);
	dbg_info *dbgi        = get_irn_dbg_info(node);
	ir_node  *block       = get_nodes_block(node);
	ir_node  *op          = get_irn_n(node, n_Builtin_max + 1);

	ir_node *conv = new_rd_Conv(dbgi, block, op, target_mode);
	set_irn_n(node, n_Builtin_max + 1, conv);

	ir_type *new_arg1   = get_type_for_mode(target_mode);
	ir_type *new_result = get_method_res_type(mtp, 0);
	ir_type *new_type   = new_type_method(1, 1, false, cc_cdecl_set, mtp_no_property);
	set_method_param_type(new_type, 0, new_arg1);
	set_method_res_type(new_type, 0, new_result);
	set_Builtin_type(node, new_type);
}
Exemplo n.º 9
0
ir_node *be_transform_phi(ir_node *node, const arch_register_req_t *req)
{
	ir_node  *block = be_transform_node(get_nodes_block(node));
	ir_graph *irg   = get_Block_irg(block);
	dbg_info *dbgi  = get_irn_dbg_info(node);

	/* phi nodes allow loops, so we use the old arguments for now
	 * and fix this later */
	ir_node **ins   = get_irn_in(node)+1;
	int       arity = get_irn_arity(node);
	ir_mode  *mode  = req->cls != NULL ? req->cls->mode : get_irn_mode(node);
	ir_node  *phi   = new_ir_node(dbgi, irg, block, op_Phi, mode, arity, ins);
	copy_node_attr(irg, node, phi);
	be_duplicate_deps(node, phi);

	backend_info_t *info = be_get_info(phi);
	struct obstack *obst = be_get_be_obst(irg);
	info->in_reqs = OALLOCN(obst, const arch_register_req_t*, arity);
	for (int i = 0; i < arity; ++i) {
		info->in_reqs[i] = req;
	}

	arch_set_irn_register_req_out(phi, 0, req);
	be_enqueue_preds(node);

	return phi;
}
Exemplo n.º 10
0
ir_node *be_duplicate_node(ir_node *node)
{
	ir_node  *block = be_transform_node(get_nodes_block(node));
	ir_graph *irg   = env.irg;
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_mode  *mode  = get_irn_mode(node);
	ir_op    *op    = get_irn_op(node);

	ir_node *new_node;
	int      arity = get_irn_arity(node);
	if (op->opar == oparity_dynamic) {
		new_node = new_ir_node(dbgi, irg, block, op, mode, -1, NULL);
		for (int i = 0; i < arity; ++i) {
			ir_node *in = get_irn_n(node, i);
			in = be_transform_node(in);
			add_irn_n(new_node, in);
		}
	} else {
		ir_node **ins = ALLOCAN(ir_node*, arity);
		for (int i = 0; i < arity; ++i) {
			ir_node *in = get_irn_n(node, i);
			ins[i] = be_transform_node(in);
		}

		new_node = new_ir_node(dbgi, irg, block, op, mode, arity, ins);
	}

	copy_node_attr(irg, node, new_node);
	be_duplicate_deps(node, new_node);

	new_node->node_nr = node->node_nr;
	return new_node;
}
Exemplo n.º 11
0
/**
 * Get the predecessor block.
 */
static ir_node *get_block_n(const ir_node *block, int pos)
{
	ir_node *cfgpred = get_cf_op(get_Block_cfgpred(block, pos));
	if (is_Bad(cfgpred))
		return NULL;
	return get_nodes_block(cfgpred);
}
Exemplo n.º 12
0
int heights_reachable_in_block(ir_heights_t *h, const ir_node *n,
                               const ir_node *m)
{
	int           res = 0;
	irn_height_t *hn  = maybe_get_height_data(h, n);
	irn_height_t *hm  = maybe_get_height_data(h, m);

	assert(get_nodes_block(n) == get_nodes_block(m));
	assert(hn != NULL && hm != NULL);

	if (hn->height <= hm->height) {
		h->visited++;
		res = search(h, n, m);
	}

	return res;
}
Exemplo n.º 13
0
/**
 * Check if a given value is last used (i.e. die after) the block of some
 * other node.
 */
static bool value_last_used_here(be_lv_t *lv, ir_node *here, ir_node *value)
{
	ir_node *block = get_nodes_block(here);

	/* If the value is live end it is for sure it does not die here */
	if (be_is_live_end(lv, block, value))
		return false;

	/* if multiple nodes in this block use the value, then we cannot decide
	 * whether the value will die here (because there is no schedule yet).
	 * Assume it does not die in this case. */
	foreach_out_edge(value, edge) {
		ir_node *user = get_edge_src_irn(edge);
		if (user != here && get_nodes_block(user) == block) {
			return false;
		}
	}
Exemplo n.º 14
0
static ir_node *convert_to_modeb(ir_node *node)
{
	ir_node  *block = get_nodes_block(node);
	ir_graph *irg   = get_irn_irg(node);
	ir_node  *zero  = new_r_Const_null(irg, lowered_mode);
	ir_node  *cmp   = new_r_Cmp(block, node, zero, ir_relation_less_greater);
	return cmp;
}
Exemplo n.º 15
0
ir_node *be_new_Copy_before_reg(ir_node *const val, ir_node *const before, arch_register_t const *const reg)
{
	ir_node *const block = get_nodes_block(before);
	ir_node *const copy  = be_new_Copy(block, val);
	sched_add_before(before, copy);
	arch_set_irn_register_out(copy, 0, reg);
	return copy;
}
Exemplo n.º 16
0
static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir_node *const spill, ir_node *const before, ir_node *const last_state)
{
	(void)env;
	(void)state;

	ir_node        *reload;
	ir_node  *const block = get_nodes_block(before);
	ir_graph *const irg   = get_irn_irg(block);
	ir_node  *const noreg = ia32_new_NoReg_gp(irg);
	ir_node  *const nomem = get_irg_no_mem(irg);
	if (ia32_cg_config.use_unsafe_floatconv) {
		reload = new_bd_ia32_FldCW(NULL, block, noreg, noreg, nomem);
		ir_entity *const rounding_mode = spill ?
			create_ent(&fpcw_round,    0xC7F, "_fpcw_round") :
			create_ent(&fpcw_truncate, 0x37F, "_fpcw_truncate");
		set_ia32_am_ent(reload, rounding_mode);
	} else {
		ir_node       *mem;
		ir_node *const frame = get_irg_frame(irg);
		if (spill) {
			mem = spill;
		} else {
			assert(last_state);
			ir_node *const cwstore = create_fnstcw(block, frame, noreg, nomem, last_state);
			sched_add_before(before, cwstore);

			ir_node *const load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore);
			set_ia32_op_type(load, ia32_AddrModeS);
			set_ia32_ls_mode(load, mode_Hu);
			set_ia32_frame_use(load, IA32_FRAME_USE_32BIT);
			sched_add_before(before, load);

			ir_node *const load_res = new_r_Proj(load, ia32_mode_gp, pn_ia32_Load_res);

			/* TODO: Make the actual mode configurable in ChangeCW. */
			ir_node *const or_const = ia32_create_Immediate(irg, 0xC00);
			ir_node *const orn      = new_bd_ia32_Or(NULL, block, noreg, noreg, nomem, load_res, or_const);
			sched_add_before(before, orn);

			ir_node *const store = new_bd_ia32_Store(NULL, block, frame, noreg, nomem, orn);
			set_ia32_op_type(store, ia32_AddrModeD);
			/* Use ia32_mode_gp, as movl has a shorter opcode than movw. */
			set_ia32_ls_mode(store, ia32_mode_gp);
			set_ia32_frame_use(store, IA32_FRAME_USE_32BIT);
			sched_add_before(before, store);
			mem = new_r_Proj(store, mode_M, pn_ia32_Store_M);
		}

		reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, mem);
	}

	set_ia32_op_type(reload, ia32_AddrModeS);
	set_ia32_ls_mode(reload, ia32_mode_fpcw);
	set_ia32_frame_use(reload, IA32_FRAME_USE_32BIT);
	arch_set_irn_register(reload, &ia32_registers[REG_FPCW]);
	sched_add_before(before, reload);
	return reload;
}
Exemplo n.º 17
0
static ir_node *create_not(dbg_info *dbgi, ir_node *node)
{
	ir_node  *block = get_nodes_block(node);
	ir_mode  *mode  = lowered_mode;
	ir_graph *irg   = get_irn_irg(node);
	ir_node  *one   = new_rd_Const_one(dbgi, irg, mode);

	return new_rd_Eor(dbgi, block, node, one);
}
Exemplo n.º 18
0
/**
 * implementation of create_set_func which produces a cond with control
 * flow
 */
static ir_node *create_cond_set(ir_node *cond_value, ir_mode *dest_mode)
{
	ir_node *lower_block = part_block_edges(cond_value);
	ir_node *upper_block = get_nodes_block(cond_value);
	foreach_out_edge_safe(upper_block, edge) {
		/* The cached nodes might belong to the lower block, so we have
		 * to clear the cache for moved nodes to avoid dominance problems. */
		ir_node *node = get_edge_src_irn(edge);
		set_irn_link(node, NULL);
	}
Exemplo n.º 19
0
/** patches Addresses to work in position independent code */
static void fix_pic_addresses(ir_node *const node, void *const data)
{
	(void)data;

	ir_graph      *const irg = get_irn_irg(node);
	be_main_env_t *const be  = be_get_irg_main_env(irg);
	foreach_irn_in(node, i, pred) {
		if (!is_Address(pred))
			continue;

		ir_node         *res;
		ir_entity *const entity = get_Address_entity(pred);
		dbg_info  *const dbgi   = get_irn_dbg_info(pred);
		if (i == n_Call_ptr && is_Call(node)) {
			/* Calls can jump to relative addresses, so we can directly jump to
			 * the (relatively) known call address or the trampoline */
			if (can_address_relative(entity))
				continue;

			ir_entity *const trampoline = get_trampoline(be, entity);
			res = new_rd_Address(dbgi, irg, trampoline);
		} else if (get_entity_type(entity) == get_code_type()) {
			/* Block labels can always be addressed directly. */
			continue;
		} else {
			/* Everything else is accessed relative to EIP. */
			ir_node *const block    = get_nodes_block(pred);
			ir_mode *const mode     = get_irn_mode(pred);
			ir_node *const pic_base = ia32_get_pic_base(irg);

			if (can_address_relative(entity)) {
				/* All ok now for locally constructed stuff. */
				res = new_rd_Add(dbgi, block, pic_base, pred, mode);
				/* Make sure the walker doesn't visit this add again. */
				mark_irn_visited(res);
			} else {
				/* Get entry from pic symbol segment. */
				ir_entity *const pic_symbol  = get_pic_symbol(be, entity);
				ir_node   *const pic_address = new_rd_Address(dbgi, irg, pic_symbol);
				ir_node   *const add         = new_rd_Add(dbgi, block, pic_base, pic_address, mode);
				mark_irn_visited(add);

				/* We need an extra indirection for global data outside our current
				 * module. The loads are always safe and can therefore float and
				 * need no memory input */
				ir_type *const type  = get_entity_type(entity);
				ir_node *const nomem = get_irg_no_mem(irg);
				ir_node *const load  = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats);
				res = new_r_Proj(load, mode, pn_Load_res);
			}
		}
		set_irn_n(node, i, res);
	}
}
Exemplo n.º 20
0
void dmemory_lower_Arraylength(ir_node *arraylength)
{
	ir_node  *array_ref = get_Arraylength_arrayref(arraylength);
	ir_node  *block     = get_nodes_block(arraylength);
	ir_graph *irg       = get_irn_irg(block);
	ir_node  *cur_mem   = get_Arraylength_mem(arraylength);
	ir_node  *len       = (*dmemory_model.get_arraylength)(array_ref, irg, block, &cur_mem);

	turn_into_tuple(arraylength, pn_Arraylength_max);
	set_irn_n(arraylength, pn_Arraylength_M, cur_mem);
	set_irn_n(arraylength, pn_Arraylength_res, len);
}
Exemplo n.º 21
0
/**
 * tests whether we can legally move node node after node after
 * (only works for nodes in same block)
 */
static bool can_move(ir_node *node, ir_node *after)
{
	ir_node *node_block = get_nodes_block(node);
	assert(node_block == get_nodes_block(after));

	/** all users have to be after the after node */
	foreach_out_edge(node, edge) {
		ir_node *out = get_edge_src_irn(edge);
		if (get_nodes_block(out) != node_block)
			continue;
		/* phi represents a usage at block end */
		if (is_Phi(out))
			continue;
		if (arch_is_irn_not_scheduled(out)) {
			if (!can_move(out, after))
				return false;
		} else {
			if (sched_get_time_step(out) <= sched_get_time_step(after))
				return false;
		}
	}
Exemplo n.º 22
0
/**
 * lower 64bit minus operation
 */
static void ia32_lower_minus64(ir_node *node, ir_mode *mode)
{
	dbg_info *dbg     = get_irn_dbg_info(node);
	ir_node  *block   = get_nodes_block(node);
	ir_node  *op      = get_Minus_op(node);
	ir_node  *op_low  = get_lowered_low(op);
	ir_node  *op_high = get_lowered_high(op);
	ir_node  *minus   = new_bd_ia32_l_Minus64(dbg, block, op_low, op_high);
	ir_node  *l_res   = new_r_Proj(minus, ia32_mode_gp, pn_ia32_Minus64_res_low);
	ir_node  *h_res   = new_r_Proj(minus, mode, pn_ia32_Minus64_res_high);
	ir_set_dw_lowered(node, l_res, h_res);
}
Exemplo n.º 23
0
/**
 * lower 64bit addition: an 32bit add for the lower parts, an add with
 * carry for the higher parts. If the carry's value is known, fold it
 * into the upper add.
 */
static void ia32_lower_add64(ir_node *node, ir_mode *mode)
{
	dbg_info     *dbg        = get_irn_dbg_info(node);
	ir_node      *block      = get_nodes_block(node);
	ir_node      *left       = get_Add_left(node);
	ir_node      *right      = get_Add_right(node);
	ir_node      *left_low   = get_lowered_low(left);
	ir_node      *left_high  = get_lowered_high(left);
	ir_node      *right_low  = get_lowered_low(right);
	ir_node      *right_high = get_lowered_high(right);
	ir_mode      *low_mode   = get_irn_mode(left_low);
	ir_mode      *high_mode  = get_irn_mode(left_high);
	carry_result  cr         = lower_add_carry(left, right, low_mode);

	assert(get_irn_mode(left_low)  == get_irn_mode(right_low));
	assert(get_irn_mode(left_high) == get_irn_mode(right_high));

	if (cr == no_carry) {
		ir_node *add_low  = new_rd_Add(dbg, block, left_low,  right_low, low_mode);
		ir_node *add_high = new_rd_Add(dbg, block, left_high, right_high, high_mode);
		ir_set_dw_lowered(node, add_low, add_high);
	} else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) {
		// We cannot assume that left_high and right_high form a normalized Add.
		ir_node *constant;
		ir_node *other;

		if (is_Const(left_high)) {
			constant = left_high;
			other    = right_high;
		} else {
			constant = right_high;
			other    = left_high;
		}

		ir_graph *irg            = get_irn_irg(right_high);
		ir_node  *one            = new_rd_Const(dbg, irg, get_mode_one(high_mode));
		ir_node  *const_plus_one = new_rd_Add(dbg, block, constant, one, high_mode);
		ir_node  *add_high       = new_rd_Add(dbg, block, other, const_plus_one, high_mode);
		ir_node  *add_low        = new_rd_Add(dbg, block, left_low, right_low, low_mode);
		ir_set_dw_lowered(node, add_low, add_high);
	} else {
		/* l_res = a_l + b_l */
		ir_node  *add_low    = new_bd_ia32_l_Add(dbg, block, left_low, right_low);
		ir_mode  *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
		ir_node  *res_low    = new_r_Proj(add_low, ia32_mode_gp, pn_ia32_l_Add_res);
		ir_node  *flags      = new_r_Proj(add_low, mode_flags, pn_ia32_l_Add_flags);

		/* h_res = a_h + b_h + carry */
		ir_node  *add_high
			= new_bd_ia32_l_Adc(dbg, block, left_high, right_high, flags, mode);
		ir_set_dw_lowered(node, res_low, add_high);
	}
}
Exemplo n.º 24
0
/**
 * This function returns the last definition of a value.  In case
 * this value was last defined in a previous block, Phi nodes are
 * inserted.  If the part of the firm graph containing the definition
 * is not yet constructed, a dummy Phi node is returned.
 *
 * @param block   the current block
 * @param pos     the value number of the value searched
 * @param mode    the mode of this value (needed for Phi construction)
 */
static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
{
	ir_node *res = block->attr.block.graph_arr[pos];
	if (res != NULL)
		return res;

	/* in a matured block we can immediately determine the phi arguments */
	if (get_Block_matured(block)) {
		ir_graph *const irg   = get_irn_irg(block);
		int       const arity = get_irn_arity(block);
		/* no predecessors: use unknown value */
		if (arity == 0) {
			if (block == get_irg_start_block(irg)) {
				if (default_initialize_local_variable != NULL) {
					ir_node *rem = get_r_cur_block(irg);
					set_r_cur_block(irg, block);
					res = default_initialize_local_variable(irg, mode, pos - 1);
					set_r_cur_block(irg, rem);
				} else {
					res = new_r_Unknown(irg, mode);
				}
			} else {
				goto bad; /* unreachable block, use Bad */
			}
		/* one predecessor just use its value */
		} else if (arity == 1) {
			ir_node *cfgpred = get_Block_cfgpred(block, 0);
			if (is_Bad(cfgpred)) {
bad:
				res = new_r_Bad(irg, mode);
			} else {
				ir_node *cfgpred_block = get_nodes_block(cfgpred);
				res = get_r_value_internal(cfgpred_block, pos, mode);
			}
		} else {
			/* multiple predecessors construct Phi */
			res = new_rd_Phi0(NULL, block, mode, pos);
			/* enter phi0 into our variable value table to break cycles
			 * arising from set_phi_arguments */
			block->attr.block.graph_arr[pos] = res;
			res = set_phi_arguments(res, pos);
		}
	} else {
		/* in case of immature block we have to keep a Phi0 */
		res = new_rd_Phi0(NULL, block, mode, pos);
		/* enqueue phi so we can set arguments once the block matures */
		res->attr.phi.next     = block->attr.block.phis;
		block->attr.block.phis = res;
	}
	block->attr.block.graph_arr[pos] = res;
	return res;
}
Exemplo n.º 25
0
/**
 * Transforms a Conv into the appropriate soft float function.
 */
static bool lower_Conv(ir_node *const n)
{
	dbg_info *const dbgi    = get_irn_dbg_info(n);
	ir_node  *const block   = get_nodes_block(n);
	ir_mode  *const mode    = get_irn_mode(n);
	ir_node        *op      = get_Conv_op(n);
	ir_mode        *op_mode = get_irn_mode(op);

	char const *name;
	if (!mode_is_float(mode)) {
		if (!mode_is_float(op_mode))
			return false;
		if (mode_is_signed(mode))
			name = "fix";
		else
			name = "fixuns";
	} else if (!mode_is_float(op_mode)) {
		ir_mode *min_mode;
		if (mode_is_signed(op_mode)) {
			name     = "float";
			min_mode = mode_Is;
		} else {
			name     = "floatun";
			min_mode = mode_Iu;
		}
		if (get_mode_size_bits(op_mode) < get_mode_size_bits(min_mode)) {
			op_mode = min_mode;
			op      = new_rd_Conv(dbgi, block, op, op_mode);
		}
	} else {
		/* Remove unnecessary Convs. */
		if (op_mode == mode) {
			exchange(n, op);
			return true;
		}
		if (get_mode_size_bits(op_mode) > get_mode_size_bits(mode))
			name = "trunc";
		else
			name = "extend";
	}

	ir_node *const in[]   = { op };
	ir_node       *result = make_softfloat_call(n, name, ARRAY_SIZE(in), in);

	/* Check whether we need a Conv for the result. */
	if (get_irn_mode(result) != mode)
		result = new_rd_Conv(dbgi, block, result, mode);

	exchange(n, result);
	return true;
}
Exemplo n.º 26
0
/**
 * lower 64bit subtraction: a 32bit sub for the lower parts, a sub
 * with borrow for the higher parts. If the borrow's value is known,
 * fold it into the upper sub.
 */
static void ia32_lower_sub64(ir_node *node, ir_mode *mode)
{
	dbg_info     *dbg        = get_irn_dbg_info(node);
	ir_node      *block      = get_nodes_block(node);
	ir_node      *left       = get_Sub_left(node);
	ir_node      *right      = get_Sub_right(node);
	ir_node      *left_low   = get_lowered_low(left);
	ir_node      *left_high  = get_lowered_high(left);
	ir_node      *right_low  = get_lowered_low(right);
	ir_node      *right_high = get_lowered_high(right);
	ir_mode      *low_mode   = get_irn_mode(left_low);
	ir_mode      *high_mode  = get_irn_mode(left_high);
	carry_result  cr         = lower_sub_borrow(left, right, low_mode);

	assert(get_irn_mode(left_low)  == get_irn_mode(right_low));
	assert(get_irn_mode(left_high) == get_irn_mode(right_high));

	if (cr == no_carry) {
		ir_node *sub_low  = new_rd_Sub(dbg, block, left_low,  right_low, low_mode);
		ir_node *sub_high = new_rd_Sub(dbg, block, left_high, right_high, high_mode);
		ir_set_dw_lowered(node, sub_low, sub_high);
	} else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) {
		ir_node  *sub_high;
		ir_graph *irg        = get_irn_irg(right_high);
		ir_node  *one        = new_rd_Const(dbg, irg, get_mode_one(high_mode));

		if (is_Const(right_high)) {
			ir_node *new_const = new_rd_Add(dbg, block, right_high, one, high_mode);
			sub_high = new_rd_Sub(dbg, block, left_high, new_const, high_mode);
		} else if (is_Const(left_high)) {
			ir_node *new_const = new_rd_Sub(dbg, block, left_high, one, high_mode);
			sub_high = new_rd_Sub(dbg, block, new_const, right_high, high_mode);
		} else {
			panic("logic error");
		}

		ir_node  *sub_low  = new_rd_Sub(dbg, block, left_low, right_low, low_mode);
		ir_set_dw_lowered(node, sub_low, sub_high);
	} else {
		/* l_res = a_l - b_l */
		ir_node  *sub_low    = new_bd_ia32_l_Sub(dbg, block, left_low, right_low);
		ir_mode  *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
		ir_node  *res_low    = new_r_Proj(sub_low, ia32_mode_gp, pn_ia32_l_Sub_res);
		ir_node  *flags      = new_r_Proj(sub_low, mode_flags, pn_ia32_l_Sub_flags);

		/* h_res = a_h - b_h - carry */
		ir_node  *sub_high
			= new_bd_ia32_l_Sbb(dbg, block, left_high, right_high, flags, mode);
		ir_set_dw_lowered(node, res_low, sub_high);
	}
}
Exemplo n.º 27
0
static void introduce_epilog(ir_node *ret)
{
	arch_register_t const *const sp_reg = &arm_registers[REG_SP];
	assert(arch_get_irn_register_req_in(ret, n_arm_Return_sp) == sp_reg->single_req);

	ir_node  *const sp         = get_irn_n(ret, n_arm_Return_sp);
	ir_node  *const block      = get_nodes_block(ret);
	ir_graph *const irg        = get_irn_irg(ret);
	ir_type  *const frame_type = get_irg_frame_type(irg);
	unsigned  const frame_size = get_type_size_bytes(frame_type);
	ir_node  *const incsp      = be_new_IncSP(sp_reg, block, sp, -frame_size, 0);
	set_irn_n(ret, n_arm_Return_sp, incsp);
	sched_add_before(ret, incsp);
}
Exemplo n.º 28
0
/**
 * Emit a Jmp.
 */
static void emit_amd64_Jmp(const ir_node *node)
{
	ir_node *block, *next_block;

	/* for now, the code works for scheduled and non-schedules blocks */
	block = get_nodes_block(node);

	/* we have a block schedule */
	next_block = sched_next_block(block);
	if (get_cfop_target_block(node) != next_block) {
		amd64_emitf(node, "jmp %L");
	} else if (be_options.verbose_asm) {
		amd64_emitf(node, "/* fallthrough to %L */");
	}
}
Exemplo n.º 29
0
static ir_node *make_softfloat_call(ir_node *const n, char const *const name,
                                    size_t const arity,
                                    ir_node *const *const in)
{
	dbg_info *const dbgi     = get_irn_dbg_info(n);
	ir_node  *const block    = get_nodes_block(n);
	ir_graph *const irg      = get_irn_irg(n);
	ir_node  *const nomem    = get_irg_no_mem(irg);
	ir_node  *const callee   = create_softfloat_address(n, name);
	ir_type  *const type     = get_softfloat_type(n);
	ir_mode  *const res_mode = get_type_mode(get_method_res_type(type, 0));
	ir_node  *const call     = new_rd_Call(dbgi, block, nomem, callee, arity,
	                                       in, type);
	ir_node  *const results  = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_node  *const result   = new_r_Proj(results, res_mode, 0);
	return result;
}
Exemplo n.º 30
0
Arquivo: eh.c Projeto: MatzeB/liboo
void eh_lower_Raise(ir_node *raise, ir_node *proj)
{
	assert (is_Raise(raise) && is_Proj(proj));

	ir_node  *ex_obj  = get_Raise_exo_ptr(raise);
	ir_node  *block   = get_nodes_block(raise);
	ir_graph *irg     = get_irn_irg(raise);
	ir_node  *cur_mem = get_Raise_mem(raise);

	ir_node  *c_symc  = new_r_SymConst(irg, throw_entity);
	ir_node  *in[1]   = { ex_obj };

	ir_node  *throw   = new_r_Call(block, cur_mem, c_symc, 1, in, get_entity_type(throw_entity));
	ir_set_throws_exception(throw, 1);
	exchange(raise, throw);
	set_Proj_num(proj, pn_Call_X_except);
}