コード例 #1
0
/**
 * Try to place a Shl into an address mode.
 *
 * @param addr    the address mode data so far
 * @param node   the node to place
 * @return true on success
 */
static bool eat_shl(x86_address_t *addr, ir_node *node)
{
	/* we can only eat a shl if we don't have a scale or index set yet */
	if (addr->scale != 0 || addr->index != NULL)
		return false;

	ir_node *shifted_val;
	long     val;
	if (is_Shl(node)) {
		/* we can use shl with 0,1,2 or 3 shift */
		ir_node *right = get_Shl_right(node);
		if (!is_Const(right))
			return false;
		ir_tarval *tv = get_Const_tarval(right);
		if (!tarval_is_long(tv))
			return false;

		val = get_tarval_long(tv);
		if (val < 0 || val > 3)
			return false;
		if (val == 0)
			be_warningf(node, "found unoptimized Shl x,0");

		shifted_val = get_Shl_left(node);
	} else if (is_Add(node)) {
		/* might be an add x, x */
		ir_node *left  = get_Add_left(node);
		ir_node *right = get_Add_right(node);
		if (left != right)
			return false;
		if (is_Const(left))
			return false;

		val         = 1;
		shifted_val = left;
	} else {
		return false;
	}

	if (x86_is_non_address_mode_node(node))
		return false;

	addr->scale = val;
	addr->index = shifted_val;
	return true;
}
コード例 #2
0
static bool eat_imm(x86_address_t *const addr, ir_node const *const node)
{
	switch (get_irn_opcode(node)) {
	case iro_Add:
		/* Add is supported as long as both operands are immediates. */
		return
			!x86_is_non_address_mode_node(node) &&
			eat_imm(addr, get_Add_left(node)) &&
			eat_imm(addr, get_Add_right(node));

	case iro_Address:
		/* The first Address of a DAG can be folded into an immediate. */
		if (addr->imm.entity)
			return false;
		addr->imm.entity = get_Address_entity(node);
		addr->imm.kind = X86_IMM_ADDR;
		if (is_tls_entity(addr->imm.entity))
			addr->tls_segment = true;
		return true;

	case iro_Const: {
		/* Add the value to the offset. */
		ir_tarval *const tv = get_Const_tarval(node);
		if (!tarval_possible(tv))
			return false;
		addr->imm.offset += get_tarval_long(tv);
		return true;
	}

	case iro_Unknown:
		/* Use '0' for Unknowns. */
		return true;

	default:
		if (be_is_Relocation(node)) {
			if (addr->imm.entity)
				return false;
			addr->imm.entity = be_get_Relocation_entity(node);
			addr->imm.kind = (x86_immediate_kind_t)be_get_Relocation_kind(node);
			return true;
		}
		/* All other nodes are no immediates. */
		return false;
	}
}
コード例 #3
0
ファイル: proc_cloning.c プロジェクト: mj3-16/libfirm
/**
 * Process a call node.
 *
 * @param call    A ir_node to be checked.
 * @param callee  The entity of the callee
 * @param hmap    The quadruple-set containing the calls with constant parameters
 */
static void process_call(ir_node *call, ir_entity *callee, q_set *hmap)
{
	/* TODO
	 * Beware: We cannot clone variadic parameters as well as the
	 * last non-variadic one, which might be needed for the va_start()
	 * magic. */

	/* In this for loop we collect the calls, that have
	   a constant parameter. */
	size_t const n_params = get_Call_n_params(call);
	for (size_t i = n_params; i-- > 0;) {
		ir_node *const call_param = get_Call_param(call, i);
		if (is_Const(call_param)) {
			/* we have found a Call to collect and we save the information
			 * we need.*/
			if (!hmap->map)
				hmap->map = new_pset(entry_cmp, 8);

			entry_t *const key = OALLOC(&hmap->obst, entry_t);
			key->q.ent   = callee;
			key->q.pos   = i;
			key->q.tv    = get_Const_tarval(call_param);
			key->q.calls = NULL;
			key->weight  = 0.0F;
			key->next    = NULL;

			/* Insert entry or get existing equivalent entry */
			entry_t *const entry = (entry_t*)pset_insert(hmap->map, key, hash_entry(key));
			/* Free memory if entry already is in set */
			if (entry != key)
				obstack_free(&hmap->obst, key);

			/* add the call to the list */
			if (!entry->q.calls) {
				entry->q.calls = NEW_ARR_F(ir_node*, 1);
				entry->q.calls[0] = call;
			} else {
				ARR_APP1(ir_node *, entry->q.calls, call);
			}
		}
コード例 #4
0
ファイル: Worklist.cpp プロジェクト: djrieger/mjplusplus
	Worklist::Worklist(ir_graph* functionGraph, GraphHandler& handler): functionGraph(functionGraph), handler(handler)
	{
		typedef void (*ir_func)(ir_node*, void*);

		struct envMembers
		{
			std::queue<ir_node*>* pQueue;
			std::unordered_map<ir_node*, bool>* pIsQueued;
		};

		envMembers envInstance;
		envInstance.pQueue = &this->worklist;
		envInstance.pIsQueued = &this->isQueued;

		ir_func addPhis = [](ir_node * node, void* env)
		{
			if (is_Phi(node))
			{
				auto envInstance = (envMembers*)env;
				set_irn_link(node, (void*)tarval_unknown);
				envInstance->pQueue->push(node);
				(*envInstance->pIsQueued)[node] = true;
			}
		};

		ir_func addToWorklist = [](ir_node * node, void* env)
		{
			if (is_Phi(node))
				return;

			auto envInstance = (envMembers*)env;

			ir_tarval* tarval;

			auto isBadNode = [&] (Node node) -> bool
			{
				if (is_Call(node) || is_Load(node) || is_Start(node))
					return true;

				/*else if (Node(node).getChildCount() > 0)
				{
					for (Node child : Node(node).getChildren())
						if (child.getTarval() == tarval_bad)
							return true;
				}*/

				return false;
			};

			// TODO: Support other modes such as Bu, Lu
			if (is_Const(node) && Node(node).getTarval().isNumeric())
				tarval = get_Const_tarval(node);
			else if (isBadNode(Node(node)))
				tarval = tarval_bad;
			else
				tarval = tarval_unknown;

			set_irn_link(node, (void*)tarval);

			envInstance->pQueue->push(node);
			(*envInstance->pIsQueued)[node] = true;
		};

		walk_topological(functionGraph, addPhis, (void*)&envInstance);
		walk_topological(functionGraph, addToWorklist, (void*)&envInstance);
	}
コード例 #5
0
ファイル: entity.c プロジェクト: qznc/libfirm
ir_node *copy_const_value(dbg_info *dbg, ir_node *n, ir_node *block)
{
	ir_graph *irg = get_irn_irg(block);

	/* @@@ GL I think we should implement this using the routines from irgopt
	 * for dead node elimination/inlineing. */
	ir_mode *m = get_irn_mode(n);
	ir_node *nn;
	switch (get_irn_opcode(n)) {
	case iro_Const:
		nn = new_rd_Const(dbg, irg, get_Const_tarval(n));
		break;
	case iro_SymConst:
		nn = new_rd_SymConst(dbg, irg, get_irn_mode(n), get_SymConst_symbol(n), get_SymConst_kind(n));
		break;
	case iro_Add:
		nn = new_rd_Add(dbg, block,
		                copy_const_value(dbg, get_Add_left(n), block),
		                copy_const_value(dbg, get_Add_right(n), block), m);
		break;
	case iro_Sub:
		nn = new_rd_Sub(dbg, block,
		                copy_const_value(dbg, get_Sub_left(n), block),
		                copy_const_value(dbg, get_Sub_right(n), block), m);
		break;
	case iro_Mul:
		nn = new_rd_Mul(dbg, block,
		                copy_const_value(dbg, get_Mul_left(n), block),
		                copy_const_value(dbg, get_Mul_right(n), block), m);
		break;
	case iro_And:
		nn = new_rd_And(dbg, block,
		                copy_const_value(dbg, get_And_left(n), block),
		                copy_const_value(dbg, get_And_right(n), block), m);
		break;
	case iro_Or:
		nn = new_rd_Or(dbg, block,
		               copy_const_value(dbg, get_Or_left(n), block),
		               copy_const_value(dbg, get_Or_right(n), block), m);
		break;
	case iro_Eor:
		nn = new_rd_Eor(dbg, block,
		                copy_const_value(dbg, get_Eor_left(n), block),
		                copy_const_value(dbg, get_Eor_right(n), block), m);
		break;
	case iro_Conv:
		nn = new_rd_Conv(dbg, block,
		                 copy_const_value(dbg, get_Conv_op(n), block), m);
		break;
	case iro_Minus:
		nn = new_rd_Minus(dbg, block,
		                  copy_const_value(dbg, get_Minus_op(n), block), m);
		break;
	case iro_Not:
		nn = new_rd_Not(dbg, block,
		                copy_const_value(dbg, get_Not_op(n), block), m);
		break;
	case iro_Unknown:
		nn = new_r_Unknown(irg, m);
		break;
	default:
		panic("opcode invalid or not implemented %+F", n);
	}
	return nn;
}
コード例 #6
0
void x86_create_address_mode(x86_address_t *addr, ir_node *node,
                             x86_create_am_flags_t flags)
{
	addr->imm.kind = X86_IMM_VALUE;
	if (eat_immediate(addr, node, true)) {
		addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM;
		return;
	}

	assert(!addr->ip_base);
	if (!(flags & x86_create_am_force) && x86_is_non_address_mode_node(node)
	    && (!(flags & x86_create_am_double_use) || get_irn_n_edges(node) > 2)) {
		addr->variant = X86_ADDR_BASE;
		addr->base    = node;
		return;
	}

	ir_node *eat_imms = eat_immediates(addr, node, flags, false);
	if (eat_imms != node) {
		if (flags & x86_create_am_force)
			eat_imms = be_skip_downconv(eat_imms, true);

		node = eat_imms;
		if (x86_is_non_address_mode_node(node)) {
			addr->variant = X86_ADDR_BASE;
			addr->base    = node;
			return;
		}
	}

	/* starting point Add, Sub or Shl, FrameAddr */
	if (is_Shl(node)) {
		/* We don't want to eat add x, x as shl here, so only test for real Shl
		 * instructions, because we want the former as Lea x, x, not Shl x, 1 */
		if (eat_shl(addr, node)) {
			addr->variant = X86_ADDR_INDEX;
			return;
		}
	} else if (eat_immediate(addr, node, true)) {
		/* we can hit this case in x86_create_am_force mode */
		addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM;
		return;
	} else if (is_Add(node)) {
		ir_node *left  = get_Add_left(node);
		ir_node *right = get_Add_right(node);

		if (flags & x86_create_am_force) {
			left  = be_skip_downconv(left, true);
			right = be_skip_downconv(right, true);
		}
		left  = eat_immediates(addr, left, flags, false);
		right = eat_immediates(addr, right, flags, false);

		if (eat_shl(addr, left)) {
			left = NULL;
		} else if (eat_shl(addr, right)) {
			right = NULL;
		}

		/* (x & 0xFFFFFFFC) + (x >> 2) -> lea(x >> 2, x >> 2, 4) */
		if (left != NULL && right != NULL) {
			ir_node *and;
			ir_node *shr;
			if (is_And(left) && (is_Shr(right) || is_Shrs(right))) {
				and = left;
				shr = right;
				goto tryit;
			}
			if (is_And(right) && (is_Shr(left) || is_Shrs(left))) {
				and = right;
				shr = left;
tryit:
				if (get_And_left(and) == get_binop_left(shr)) {
					ir_node *and_right = get_And_right(and);
					ir_node *shr_right = get_binop_right(shr);

					if (is_Const(and_right) && is_Const(shr_right)) {
						ir_tarval *and_mask     = get_Const_tarval(and_right);
						ir_tarval *shift_amount = get_Const_tarval(shr_right);
						ir_mode   *mode         = get_irn_mode(and);
						ir_tarval *all_one      = get_mode_all_one(mode);
						ir_tarval *shift_mask   = tarval_shl(tarval_shr(all_one, shift_amount), shift_amount);
						long       val          = get_tarval_long(shift_amount);

						if (and_mask == shift_mask && val >= 0 && val <= 3) {
							addr->variant = X86_ADDR_BASE_INDEX;
							addr->base    = shr;
							addr->index   = shr;
							addr->scale   = val;
							return;
						}
					}
				}
			}
		}

		if (left != NULL) {
			ir_node *base = addr->base;
			if (base == NULL) {
				addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX
				                                    : X86_ADDR_BASE;
				addr->base    = left;
			} else {
				addr->variant = X86_ADDR_BASE_INDEX;
				assert(addr->index == NULL && addr->scale == 0);
				assert(right == NULL);
				/* esp must be used as base */
				if (is_Proj(left) && is_Start(get_Proj_pred(left))) {
					addr->index = base;
					addr->base  = left;
				} else {
					addr->index = left;
				}
			}
		}
		if (right != NULL) {
			ir_node *base = addr->base;
			if (base == NULL) {
				addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX
				                                    : X86_ADDR_BASE;
				addr->base    = right;
			} else {
				addr->variant = X86_ADDR_BASE_INDEX;
				assert(addr->index == NULL && addr->scale == 0);
				/* esp must be used as base */
				if (is_Proj(right) && is_Start(get_Proj_pred(right))) {
					addr->index = base;
					addr->base  = right;
				} else {
					addr->index = right;
				}
			}
		}
		return;
	}

	addr->variant = X86_ADDR_BASE;
	addr->base    = node;
}