Esempio n. 1
0
int is_irn_const_expression(ir_node *n)
{
	/* we are in danger iff an exception will arise. TODO: be more precisely,
	 * for instance Div. will NOT rise if divisor != 0 */
	if (is_binop(n) && !is_fragile_op(n))
		return is_irn_const_expression(get_binop_left(n)) && is_irn_const_expression(get_binop_right(n));

	switch (get_irn_opcode(n)) {
	case iro_Const:
	case iro_SymConst:
	case iro_Unknown:
		return 1;
	case iro_Conv:
		return is_irn_const_expression(get_irn_n(n, 0));
	default:
		break;
	}
	return 0;
}
Esempio n. 2
0
/// binoprhs: ([+*/^-] primary)*
ex parser::parse_binop_rhs(int expr_prec, ex& lhs)
{
	exvector args;
	args.push_back(lhs);
	int binop = -1, orig_binop = -1;
	bool need_sign_flip = false;
	while (1) {
		// check if this is a binop
		if (!is_binop(token)) {
			if (args.size() > 1)
				return make_binop_expr(orig_binop, args);
			else
				return lhs;
		}
		
		// Okay, we know this is a binop.
		if (args.size() == 1)
			orig_binop = token;

		binop = token;

		// If this is a binop that binds at least as tightly as
		// the current binop, consume it, otherwise we are done.
		int tok_prec = get_tok_prec(token);
		if (tok_prec < expr_prec) {
			if (args.size() > 1)
				return make_binop_expr(orig_binop, args);
			else 
				return lhs;
		}

		get_next_tok();  // eat binop

		// Parse the primary expression after the binary operator.
		ex rhs = parse_primary();

		// If binop binds less tightly with rhs than the operator after
		// rhs, let the pending operator take rhs as its lhs.
		int next_prec = get_tok_prec(token);
		if (tok_prec < next_prec)
			rhs = parse_binop_rhs(tok_prec + 1, rhs);

		// previous operator was '+', and current one is '-'
		// (or vice a versa).
		if (need_sign_flip)
			rhs = - rhs;

		args.push_back(rhs);

		// Minimize the number of eval() and ctor calls. This is
		// crucial for a reasonable performance. If the next operator
		// is compatible with the pending one (or the same) don't create
		// the expression and continue collecting operands instead.
		if (binop == token)
			continue;
		else if (binop == '+' && token == '-') {
			need_sign_flip = token != orig_binop;
			continue;
		} else if (binop == '-' && token == '+') {
			need_sign_flip = token != orig_binop;
			continue;
		} else { 
			if (args.size() <= 1)
				bug("binop has " << args.size() << " arguments, expected >= 2");
			lhs = make_binop_expr(orig_binop, args);
			args.clear();
			args.push_back(lhs);
		}
	}
}
Esempio n. 3
0
/**
 * Compute the weight of a method parameter
 *
 * @param arg  The parameter them weight muss be computed.
 */
static unsigned calc_method_param_weight(ir_node *arg)
{
	/* We mark the nodes to avoid endless recursion */
	mark_irn_visited(arg);

	unsigned weight = null_weight;
	for (int i = get_irn_n_outs(arg); i-- > 0; ) {
		ir_node *succ = get_irn_out(arg, i);
		if (irn_visited(succ))
			continue;

		/* We should not walk over the memory edge.*/
		if (get_irn_mode(succ) == mode_M)
			continue;

		switch (get_irn_opcode(succ)) {
		case iro_Call:
			if (get_Call_ptr(succ) == arg) {
				/* the arguments is used as an pointer input for a call,
				   we can probably change an indirect Call into a direct one. */
				weight += indirect_call_weight;
			}
			break;
		case iro_Cmp: {
			/* We have reached a cmp and we must increase the
			   weight with the cmp_weight. */
			ir_node *op;
			if (get_Cmp_left(succ) == arg)
				op = get_Cmp_right(succ);
			else
				op = get_Cmp_left(succ);

			if (is_irn_constlike(op)) {
				weight += const_cmp_weight;
			} else
				weight += cmp_weight;
			break;
		}
		case iro_Cond:
			/* the argument is used for a SwitchCond, a big win */
			weight += const_cmp_weight * get_irn_n_outs(succ);
			break;
		case iro_Id:
			/* when looking backward we might find Id nodes */
			weight += calc_method_param_weight(succ);
			break;
		case iro_Tuple:
			/* unoptimized tuple */
			for (int j = get_Tuple_n_preds(succ); j-- > 0; ) {
				ir_node *pred = get_Tuple_pred(succ, j);
				if (pred == arg) {
					/* look for Proj(j) */
					for (int k = get_irn_n_outs(succ); k-- > 0; ) {
						ir_node *succ_succ = get_irn_out(succ, k);
						if (is_Proj(succ_succ)) {
							if (get_Proj_proj(succ_succ) == j) {
								/* found */
								weight += calc_method_param_weight(succ_succ);
							}
						} else {
							/* this should NOT happen */
						}
					}
				}
			}
			break;
		default:
			if (is_binop(succ)) {
				/* We have reached a BinOp and we must increase the
				   weight with the binop_weight. If the other operand of the
				   BinOp is a constant we increase the weight with const_binop_weight
				   and call the function recursive.
				 */
				ir_node *op;
				if (get_binop_left(succ) == arg)
					op = get_binop_right(succ);
				else
					op = get_binop_left(succ);

				if (is_irn_constlike(op)) {
					weight += const_binop_weight;
					weight += calc_method_param_weight(succ);
				} else
					weight += binop_weight;
			} else if (get_irn_arity(succ) == 1) {
				/* We have reached a binop and we must increase the
				   weight with the const_binop_weight and call the function
				   recursive.*/
				weight += const_binop_weight;
				weight += calc_method_param_weight(succ);
			}
			break;
		}
	}
	set_irn_link(arg, NULL);
	return weight;
}