Beispiel #1
0
/**
 * count the DAG's size of a graph
 *
 * @param global  the global entry
 * @param graph   the current graph entry
 */
void count_dags_in_graph(graph_entry_t *global, graph_entry_t *graph)
{
	dag_env_t   root_env;
	dag_entry_t *entry;
	unsigned id;
	(void) global;

	/* do NOT check the const code irg */
	if (graph->irg == get_const_code_irg())
		return;

	/* first step, clear the links */
	irg_walk_graph(graph->irg, firm_clear_link, NULL, NULL);

	obstack_init(&root_env.obst);
	root_env.num_of_dags  = 0;
	root_env.list_of_dags = NULL;
	root_env.options      = FIRMSTAT_COPY_CONSTANTS | FIRMSTAT_LOAD_IS_LEAVE | FIRMSTAT_CALL_IS_LEAVE;

	/* find the DAG roots that are referenced from other block */
	irg_walk_graph(graph->irg, NULL, find_dag_roots, &root_env);

	/* connect and count them */
	irg_walk_graph(graph->irg, connect_dags, NULL, &root_env);

	printf("Graph %p %s --- %u\n", (void *)graph->irg, get_entity_name(get_irg_entity(graph->irg)),
		root_env.num_of_dags);

	for (id = 0, entry = root_env.list_of_dags; entry; entry = entry->next) {
		if (entry->is_dead)
			continue;
		entry->id = id++;

		printf("number of roots %u number of nodes %u inner %u tree %u %ld\n",
			entry->num_roots,
			entry->num_nodes,
			entry->num_inner_nodes,
			(unsigned)entry->is_tree,
			get_irn_node_nr(entry->root));
	}

	/* dump for test */
	mark_options = root_env.options;
	set_dump_node_vcgattr_hook(stat_dag_mark_hook);
	dump_ir_graph(graph->irg, "dag");
	set_dump_node_vcgattr_hook(NULL);

	assert(id == root_env.num_of_dags);

	obstack_free(&root_env.obst, NULL);
}
Beispiel #2
0
void lower_CopyB(ir_graph *irg, unsigned max_small_sz, unsigned min_large_sz,
                 int allow_misaligns)
{
	const backend_params *bparams = be_get_backend_param();

	assert(max_small_sz < min_large_sz && "CopyB size ranges must not overlap");

	max_small_size      = max_small_sz;
	min_large_size      = min_large_sz;
	native_mode_bytes   = bparams->machine_size / 8;
	allow_misalignments = allow_misaligns;

	walk_env_t env = { .copybs = NEW_ARR_F(ir_node*, 0) };
	irg_walk_graph(irg, NULL, find_copyb_nodes, &env);

	bool changed = false;
	for (size_t i = 0, n = ARR_LEN(env.copybs); i != n; ++i) {
		lower_copyb_node(env.copybs[i]);
		changed = true;
	}
	confirm_irg_properties(irg, changed ? IR_GRAPH_PROPERTIES_CONTROL_FLOW
	                                    : IR_GRAPH_PROPERTIES_ALL);

	DEL_ARR_F(env.copybs);
}
Beispiel #3
0
void oo_devirtualize_local(ir_graph *irg)
{
	//dump_ir_graph(irg, "--before");

	rta_init();

	init_irtypeinfo();
	set_irg_typeinfo_state(irg, ir_typeinfo_consistent);

	compute_inh_transitive_closure();

	bool changed;
	do {
		changed = false;
		irg_walk_graph(irg, infer_typeinfo_walker, NULL, (void*)&changed);
	} while (changed);

	//dump_ir_graph(irg, "--typeinfo");

	set_opt_dyn_meth_dispatch(1);

	transform_node_func oldfunc = get_op_ops(get_op_Sel())->transform_node;
	((ir_op_ops*)get_op_ops(get_op_Sel()))->transform_node = transform_node_Sel2;
	local_optimize_graph(irg);

	((ir_op_ops*)get_op_ops(get_op_Sel()))->transform_node = oldfunc; // restore

	free_irtypeinfo();

	rta_cleanup();

	//dump_ir_graph(irg, "--devirtualize-local");
}
Beispiel #4
0
void amd64_adjust_pic(ir_graph *irg)
{
	switch (be_options.pic_style) {
	case BE_PIC_NONE:
		return;
	case BE_PIC_ELF_PLT:
		irg_walk_graph(irg, fix_address_pic_elf, NULL, NULL);
		break;
	case BE_PIC_ELF_NO_PLT:
		panic("amd64 elf/no-plt not implemented yet");
	case BE_PIC_MACH_O:
		irg_walk_graph(irg, fix_address_pic_mach_o, NULL, NULL);
		break;
	}
	be_dump(DUMP_BE, irg, "pic");
}
Beispiel #5
0
void lower_alloc(ir_graph *irg, unsigned new_stack_alignment, bool lower_consts,
                 long new_addr_delta)
{
	if (!is_po2(stack_alignment))
		panic("stack alignment not a power of 2");
	addr_delta           = new_addr_delta;
	stack_alignment      = new_stack_alignment;
	lower_constant_sizes = lower_consts;
	ir_nodeset_init(&transformed);
	irg_walk_graph(irg, lower_alloca_free, NULL, NULL);
	ir_nodeset_destroy(&transformed);
}
Beispiel #6
0
void lower_alloc(ir_graph *irg, unsigned new_po2_stack_alignment)
{
	if (new_po2_stack_alignment == 0)
		return;

	po2_stack_alignment = new_po2_stack_alignment;
	bool changed = false;
	irg_walk_graph(irg, NULL, lower_node, &changed);

	confirm_irg_properties(irg, changed ? IR_GRAPH_PROPERTIES_CONTROL_FLOW
	                                    : IR_GRAPH_PROPERTIES_ALL);
}
Beispiel #7
0
void remove_tuples(ir_graph *irg)
{
	bool changed = false;
	irg_walk_graph(irg, exchange_tuple_projs, NULL, &changed);

	/* remove Tuples only held by keep-alive edges */
	ir_node *end = get_irg_end(irg);
	for (int i = get_End_n_keepalives(end); i-- > 0; ) {
		ir_node *irn = get_End_keepalive(end, i);
		if (is_Tuple(irn)) {
			remove_End_n(end, i);
			changed = true;
		}
	}

	confirm_irg_properties(irg, changed
			? IR_GRAPH_PROPERTIES_CONTROL_FLOW | IR_GRAPH_PROPERTY_ONE_RETURN
			  | IR_GRAPH_PROPERTY_MANY_RETURNS | IR_GRAPH_PROPERTY_NO_BADS
			: IR_GRAPH_PROPERTIES_ALL);
	add_irg_properties(irg, IR_GRAPH_PROPERTY_NO_TUPLES);
}
Beispiel #8
0
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type,
                                                      ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		/* our current vaarg handling needs the standard space to store the
		 * args 0-5 in it */
		if (is_method_variadic(function_type))
			omit_fp = false;
		/* The pointer to the aggregate return value belongs to the 92 magic bytes.
		 * Thus, if the called functions increases the stack size,
		 * it must copy the value to the appropriate location.
		 * This is not implemented yet, so we forbid to omit the frame pointer.
		 */
		if (get_method_calling_convention(function_type) & cc_compound_ret)
			omit_fp = false;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
		sparc_get_irg_data(irg)->omit_fp = omit_fp;
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	unsigned *caller_saves = rbitset_malloc(N_SPARC_REGISTERS);
	if (mtp & mtp_property_returns_twice) {
		rbitset_copy(caller_saves, default_returns_twice_saves,
		             N_SPARC_REGISTERS);
	} else {
		rbitset_copy(caller_saves, default_caller_saves, N_SPARC_REGISTERS);
	}

	/* determine how parameters are passed */
	int                 n_params = get_method_n_params(function_type);
	int                 regnum   = 0;
	reg_or_stackslot_t *params   = XMALLOCNZ(reg_or_stackslot_t, n_params);

	int      n_param_regs = ARRAY_SIZE(param_regs);
	unsigned stack_offset = !omit_fp ? SPARC_MIN_STACKSIZE : 0;
	for (int i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type,i);
		ir_mode            *mode;
		int                 bits;
		reg_or_stackslot_t *param;

		if (is_compound_type(param_type))
			panic("compound arguments not supported yet");

		mode  = get_type_mode(param_type);
		bits  = get_mode_size_bits(mode);
		param = &params[i];

		if (i == 0 &&
		    (get_method_calling_convention(function_type) & cc_compound_ret)) {
			assert(mode_is_reference(mode) && bits == 32);
			/* special case, we have reserved space for this on the between
			 * type */
			param->type   = param_type;
			param->offset = SPARC_AGGREGATE_RETURN_OFFSET;
			param->already_stored = true;
			continue;
		}

		if (regnum < n_param_regs) {
			param->offset = SPARC_PARAMS_SPILL_OFFSET
			                + regnum * SPARC_REGISTER_SIZE;
			param->type   = param_type;
			arch_register_t const *reg = param_regs[regnum++];
			if (irg == NULL || omit_fp)
				reg = map_i_to_o_reg(reg);
			param->reg0 = reg;
			param->req0 = reg->single_req;
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			param->already_stored = true;
			/* increase offset by at least SPARC_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += MAX(bits / 8, SPARC_REGISTER_SIZE);
			continue;
		}

		/* we might need a 2nd 32bit component (for 64bit or double values) */
		if (bits > 32) {
			if (bits > 64)
				panic("only 32 and 64bit modes supported");

			if (regnum < n_param_regs) {
				param->offset = SPARC_PARAMS_SPILL_OFFSET
				                + regnum * SPARC_REGISTER_SIZE;
				arch_register_t const *reg = param_regs[regnum++];
				if (irg == NULL || omit_fp)
					reg = map_i_to_o_reg(reg);
				param->reg1 = reg;
				param->req1 = reg->single_req;
			} else {
				ir_mode *regmode = param_regs[0]->cls->mode;
				ir_type *type    = get_type_for_mode(regmode);
				param->type      = type;
				param->offset    = stack_offset;
				assert(get_mode_size_bits(regmode) == 32);
				stack_offset += SPARC_REGISTER_SIZE;
			}
		}
	}
	unsigned n_param_regs_used = regnum;

	/* determine how results are passed */
	int                 n_results           = get_method_n_ress(function_type);
	unsigned            float_regnum        = 0;
	unsigned            n_reg_results       = 0;
	unsigned            n_float_result_regs = ARRAY_SIZE(float_result_regs);
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	regnum        = 0;
	for (int i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		if (mode_is_float(result_mode)) {
			unsigned n_regs   = determine_n_float_regs(result_mode);
			unsigned next_reg = round_up2(float_regnum, n_regs);

			if (next_reg >= n_float_result_regs) {
				panic("too many float results");
			} else {
				const arch_register_t *reg = float_result_regs[next_reg];
				rbitset_clear(caller_saves, reg->global_index);
				if (n_regs == 1) {
					result->req0 = reg->single_req;
				} else if (n_regs == 2) {
					result->req0 = &float_result_reqs_double[next_reg];
					rbitset_clear(caller_saves, reg->global_index+1);
				} else if (n_regs == 4) {
					result->req0 = &float_result_reqs_quad[next_reg];
					rbitset_clear(caller_saves, reg->global_index+1);
					rbitset_clear(caller_saves, reg->global_index+2);
					rbitset_clear(caller_saves, reg->global_index+3);
				} else {
					panic("invalid number of registers in result");
				}
				float_regnum = next_reg + n_regs;

				++n_reg_results;
			}
		} else {
			if (get_mode_size_bits(result_mode) > 32) {
				panic("results with more than 32bits not supported yet");
			}

			if (regnum >= n_param_regs) {
				panic("too many results");
			} else {
				const arch_register_t *reg = param_regs[regnum++];
				if (irg == NULL || omit_fp)
					reg = map_i_to_o_reg(reg);
				result->req0 = reg->single_req;
				rbitset_clear(caller_saves, reg->global_index);
				++n_reg_results;
			}
		}
	}

	calling_convention_t *cconv = XMALLOCZ(calling_convention_t);
	cconv->n_parameters     = n_params;
	cconv->parameters       = params;
	cconv->param_stack_size = stack_offset - SPARC_MIN_STACKSIZE;
	cconv->n_param_regs     = n_param_regs_used;
	cconv->results          = results;
	cconv->omit_fp          = omit_fp;
	cconv->caller_saves     = caller_saves;
	cconv->n_reg_results    = n_reg_results;

	/* setup ignore register array */
	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_SPARC_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
	}

	return cconv;
}
Beispiel #9
0
	/* Compute the call graph */
	foreach_irp_irg(i, irg) {
		construct_cf_backedges(irg);   // We also find the maximal loop depth of a call.
		irg_walk_graph(irg, ana_Call, NULL, NULL);
	}
Beispiel #10
0
void ia32_adjust_pic(ir_graph *irg)
{
	if (ia32_pic_style == IA32_PIC_NONE)
		return;
	irg_walk_graph(irg, fix_pic_addresses, NULL, NULL);
}
Beispiel #11
0
/**
 * Maps all intrinsic calls that the backend support
 * and map all instructions the backend did not support
 * to runtime calls.
 */
static void arm_handle_intrinsics(ir_graph *irg)
{
	arm_create_runtime_entities();
	irg_walk_graph(irg, handle_intrinsic, NULL, NULL);
}
Beispiel #12
0
x86_cconv_t *ia32_decide_calling_convention(ir_type *function_type,
                                            ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	(void)mtp;
	/* TODO: do something with cc_reg_param/cc_this_call */

	unsigned *caller_saves = rbitset_malloc(N_IA32_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_IA32_REGISTERS);
	rbitset_copy(caller_saves, default_caller_saves, N_IA32_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_IA32_REGISTERS);

	/* determine how parameters are passed */
	unsigned            n_params           = get_method_n_params(function_type);
	unsigned            param_regnum       = 0;
	unsigned            float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);

	unsigned n_param_regs       = ARRAY_SIZE(default_param_regs);
	unsigned n_float_param_regs = ARRAY_SIZE(float_param_regs);
	unsigned stack_offset       = 0;
	for (unsigned i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type, i);
		reg_or_stackslot_t *param      = &params[i];
		if (is_aggregate_type(param_type)) {
			param->type   = param_type;
			param->offset = stack_offset;
			stack_offset += get_type_size_bytes(param_type);
			goto align_stack;
		}

		ir_mode *mode = get_type_mode(param_type);
		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) {
			param->reg = float_param_regs[float_param_regnum++];
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = default_param_regs[param_regnum++];
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			stack_offset += get_type_size_bytes(param_type);
align_stack:;
			/* increase offset by at least IA32_REGISTER_SIZE bytes so
			 * everything is aligned */
			unsigned misalign = stack_offset % IA32_REGISTER_SIZE;
			if (misalign > 0)
				stack_offset += IA32_REGISTER_SIZE - misalign;
		}
	}

	unsigned n_param_regs_used = param_regnum + float_param_regnum;

	/* determine how results are passed */
	unsigned            n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	unsigned            n_result_regs       = ARRAY_SIZE(result_regs);
	unsigned            n_float_result_regs = ARRAY_SIZE(float_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	calling_convention cc = get_method_calling_convention(function_type);

	x86_cconv_t *cconv    = XMALLOCZ(x86_cconv_t);
	cconv->sp_delta       = (cc & cc_compound_ret) && !(cc & cc_reg_param)
	                        ? IA32_REGISTER_SIZE : 0;
	cconv->parameters     = params;
	cconv->n_parameters   = n_params;
	cconv->callframe_size = stack_offset;
	cconv->n_param_regs   = n_param_regs_used;
	cconv->n_xmm_regs     = float_param_regnum;
	cconv->results        = results;
	cconv->omit_fp        = omit_fp;
	cconv->caller_saves   = caller_saves;
	cconv->callee_saves   = callee_saves;
	cconv->n_reg_results  = n_reg_results;

	if (irg != NULL) {
		be_irg_t       *birg      = be_birg_from_irg(irg);
		size_t          n_ignores = ARRAY_SIZE(ignore_regs);
		struct obstack *obst      = &birg->obst;

		birg->allocatable_regs = rbitset_obstack_alloc(obst, N_IA32_REGISTERS);
		rbitset_set_all(birg->allocatable_regs, N_IA32_REGISTERS);
		for (size_t r = 0; r < n_ignores; ++r) {
			rbitset_clear(birg->allocatable_regs, ignore_regs[r]);
		}
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_EBP);
	}

	return cconv;
}
Beispiel #13
0
	foreach_irp_irg(i, irg) {
		irg_walk_graph(irg, pre, post, env);
	}
Beispiel #14
0
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type,
                                             ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
		amd64_get_irg_data(irg)->omit_fp = omit_fp;
	}

	unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS);
	rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS);

	/* determine how parameters are passed */
	size_t              n_params           = get_method_n_params(function_type);
	size_t              param_regnum       = 0;
	size_t              float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);
	/* x64 always reserves space to spill the first 4 arguments to have it
	 * easy in case of variadic functions. */
	unsigned stack_offset = amd64_use_x64_abi ? 32 : 0;
	for (size_t i = 0; i < n_params; ++i) {
		ir_type *param_type = get_method_param_type(function_type,i);
		if (is_compound_type(param_type))
			panic("compound arguments NIY");

		ir_mode *mode = get_type_mode(param_type);
		int      bits = get_mode_size_bits(mode);
		reg_or_stackslot_t *param = &params[i];

		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs
		    && mode != x86_mode_E) {
			param->reg = float_param_regs[float_param_regnum++];
			if (amd64_use_x64_abi) {
				++param_regnum;
			}
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = param_regs[param_regnum++];
			if (amd64_use_x64_abi) {
				++float_param_regnum;
			}
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			/* increase offset by at least AMD64_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += round_up2(bits / 8, AMD64_REGISTER_SIZE);
		}
	}

	/* If the function is variadic, we add all unused parameter
	 * passing registers to the end of the params array, first GP,
	 * then XMM. */
	if (irg && is_method_variadic(function_type)) {
		if (amd64_use_x64_abi) {
			panic("Variadic functions on Windows ABI not supported");
		}

		int params_remaining = (n_param_regs - param_regnum) +
			(n_float_param_regs - float_param_regnum);
		params = XREALLOC(params, reg_or_stackslot_t, n_params + params_remaining);
		size_t i = n_params;

		for (; param_regnum < n_param_regs; param_regnum++, i++) {
			params[i].reg = param_regs[param_regnum];
		}

		for (; float_param_regnum < n_float_param_regs; float_param_regnum++, i++) {
			params[i].reg = float_param_regs[float_param_regnum];
		}
	}

	unsigned n_param_regs_used
		= amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum;

	/* determine how results are passed */
	size_t              n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	unsigned            res_x87_regnum      = 0;
	size_t              n_result_regs       = ARRAY_SIZE(result_regs);
	size_t              n_float_result_regs = ARRAY_SIZE(float_result_regs);
	size_t              n_x87_result_regs   = ARRAY_SIZE(x87_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (result_mode == x86_mode_E) {
			if (res_x87_regnum >= n_x87_result_regs)
				panic("too manu x87 floating point results");
			reg = x87_result_regs[res_x87_regnum++];
		} else if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	x86_cconv_t *cconv     = XMALLOCZ(x86_cconv_t);
	cconv->parameters      = params;
	cconv->n_parameters    = n_params;
	cconv->param_stacksize = stack_offset;
	cconv->n_param_regs    = n_param_regs_used;
	cconv->n_xmm_regs      = float_param_regnum;
	cconv->results         = results;
	cconv->omit_fp         = omit_fp;
	cconv->caller_saves    = caller_saves;
	cconv->callee_saves    = callee_saves;
	cconv->n_reg_results   = n_reg_results;

	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_AMD64_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_RBP);
	}

	return cconv;
}
Beispiel #15
0
/*
 * Replaces SymConsts by a real constant if possible.
 * Replace Sel nodes by address computation.  Also resolves array access.
 * Handle Bitfields by added And/Or calculations.
 */
void lower_highlevel_graph(ir_graph *irg)
{
	/* Finally: lower SymConst-Size and Sel nodes, unaligned Load/Stores. */
	irg_walk_graph(irg, NULL, lower_irnode, NULL);
}
Beispiel #16
0
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type,
                                             ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS);
	if (mtp & mtp_property_returns_twice)
		panic("amd64: returns_twice calling convention NIY");
	rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS);

	/* determine how parameters are passed */
	size_t              n_params           = get_method_n_params(function_type);
	size_t              param_regnum       = 0;
	size_t              float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);
	/* x64 always reserves space to spill the first 4 arguments to have it
	 * easy in case of variadic functions. */
	unsigned stack_offset = amd64_use_x64_abi ? 32 : 0;
	for (size_t i = 0; i < n_params; ++i) {
		ir_type *param_type = get_method_param_type(function_type,i);
		if (is_compound_type(param_type))
			panic("amd64: compound arguments NIY");

		ir_mode *mode = get_type_mode(param_type);
		int      bits = get_mode_size_bits(mode);
		reg_or_stackslot_t *param = &params[i];

		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) {
			param->reg = float_param_regs[float_param_regnum++];
			if (amd64_use_x64_abi)
				++param_regnum;
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = param_regs[param_regnum++];
			if (amd64_use_x64_abi)
				++float_param_regnum;
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			/* increase offset by at least AMD64_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += MAX(bits / 8, AMD64_REGISTER_SIZE);
			continue;
		}

	}

	unsigned n_param_regs_used
		= amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum;

	/* determine how results are passed */
	size_t              n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	size_t              n_result_regs       = ARRAY_SIZE(result_regs);
	size_t              n_float_result_regs = ARRAY_SIZE(float_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	x86_cconv_t *cconv    = XMALLOCZ(x86_cconv_t);
	cconv->parameters     = params;
	cconv->callframe_size = stack_offset;
	cconv->n_param_regs   = n_param_regs_used;
	cconv->n_xmm_regs     = float_param_regnum;
	cconv->results        = results;
	cconv->omit_fp        = omit_fp;
	cconv->caller_saves   = caller_saves;
	cconv->callee_saves   = callee_saves;
	cconv->n_reg_results  = n_reg_results;

	if (irg != NULL) {
		be_irg_t       *birg      = be_birg_from_irg(irg);
		size_t          n_ignores = ARRAY_SIZE(ignore_regs);
		struct obstack *obst      = &birg->obst;

		birg->allocatable_regs = rbitset_obstack_alloc(obst, N_AMD64_REGISTERS);
		rbitset_set_all(birg->allocatable_regs, N_AMD64_REGISTERS);
		for (size_t r = 0; r < n_ignores; ++r) {
			rbitset_clear(birg->allocatable_regs, ignore_regs[r]);
		}
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_RBP);
	}

	return cconv;
}