static void write_const(CTX *ctx, struct ir_const_val v) { char *s = const_unparse(NULL, v); if (TEST_UNION(IR_TYPE, tarray, &v.type)) { fprintf(ctx->f, "{%s}", s); } else if (type_is_integer(v.type)) { // Get rid of annoying-but-correct gcc warning. This happens because we // negate a value not representable in intmax_t, even though the result // is representable, i.e. the only case is INT64_MIN. if (type_equals(v.type, type_integer(true, 64)) && *GET_UNION(VALUE, vuint64, &v.value) == INT64_MIN) { fprintf(ctx->f, "INT64_MIN"); } else { fprintf(ctx->f, "%s(%s)", int_const(v.type), s); } } else if (TEST_UNION(IR_TYPE, tslice, &v.type)) { if (TEST_UNION0(VALUE, vempty, &v.value)) { fprintf(ctx->f, "{0}"); } else if (type_equals(v.type, TYPE_STRING)) { char *raw = *GET_UNION(VALUE, vstring, &v.value); fprintf(ctx->f, "{ %s, %zd }", s, raw ? strlen(raw) : 0); } else { assert(false); } } else { fprintf(ctx->f, "%s", s); } talloc_free(s); }
/* Create a new variable */ static node_ptr new_ast_var(char *name) { node_ptr result = new_node0(E_VAR, IOP_NONE); char *sname = strsave(name); result->val = int_const(0L); result->name = sname; return result; }
/* Make constant node from sizeof declaration. Type node as parameter */ node_ptr sizeof_node(node_ptr tnode) { node_ptr result = new_node0(E_CONST, IOP_NONE); result->dtype = DATA_UNSIGNED; result->wsize = LSIZE; result->val = int_const(tnode->wsize/CSIZE); return result; }
/** Create an expression for byte offset of a group field. * */ Expression* NodeBuilder::get_field_offset_exp(FieldSymbol* fsym) { Expression *boffset = fsym->get_bit_offset(); if (!is_kind_of<IntConstant>(boffset)) { trash(fsym); SUIF_THROW(SuifException(String("Field offset not in IntConstant ") + to_id_string(fsym))); } IInteger v = to<IntConstant>(boffset)->get_value(); return int_const(v.div(IInteger(BITSPERBYTE))); }
node_ptr new_node(node_t ntype, iop_t op, int degree) { node_ptr result = malloc(sizeof(node_ele)); result->ntype = ntype; result->op = op; result->dtype = DATA_SIGNED; result->wsize = WSIZE; result->degree = degree; result->val = int_const(0L); result->name = NULL; result->isdefined = one(); result->children[0] = result->children[0] = result->children[0] = NULL; return result; }
static Node val_nodea1(int init_val) /*;val_nodea1*/ { /* Called from init_sem to initialize the bounds of predefined types.*/ /* like val_node1, but does not save generated node */ Node node; node = node_new(as_ivalue); /* INTEGER case */ N_TYPE(node) = symbol_integer; N_VAL(node) =(char *) int_const(init_val); return node; }
static Const eval_lit_map(Symbol obj) /*;eval_lit_map*/ { Symbol typ; Tuple tup; int i; typ = TYPE_OF(obj); tup = (Tuple) literal_map(typ); for (i = 1; i <= tup_size(tup); i += 2) { if (ORIG_NAME(obj) == (char *)0) continue; if (streq(tup[i], ORIG_NAME(obj))) return int_const((int)tup[i+1]); } return const_new(CONST_OM); /*(return literal_map(TYPE_OF(obj))(original_name(obj));*/ }
if (old_dtype != dtype || old_wsize != wsize) { printf("Changing '%s' to data type %s, word size %d (msb at %d)\n", sval, dtype == DATA_UNSIGNED ? "unsigned" : "int", wsize, msone); } #endif ok = 1; } } if (!ok) { yyserror("Number too large for data type: '%s'", sval); } } } result->wsize = wsize; result->val = dtype == DATA_FLOAT ? float_const(fval) : mask_size(dtype == DATA_UNSIGNED ? uint_const(val) : int_const(val), wsize, dtype); result->dtype = dtype; return result; } /******************************************** Output Display *****************************************/ /* 100 spaces */ static char *fill_buf = " "; static int fill_pos = 0; static int max_pos = 99; static char *fill_string = ""; void reset_pos() { fill_pos = max_pos;
IntConstant* NodeBuilder::bool_const(bool v) { return int_const(IInteger(v ? 1 : 0)); }
IntConstant* NodeBuilder::int_const(int i) { return int_const(IInteger(i)); }
/* Object evaluation */ void gen_address(Node node) /*;gen_address*/ { /* * This procedure generates code for the o_expressions * or, in other words, the left-handsides. */ Node pre_node, array_node, range_node, lbd_node, ubd_node, record_node, field_node, id_node; Symbol node_name, type_name, record_name, record_type, field_name, comp_type, proc_name, return_type; int f_off, bse, off, nk; Fortup ft1; #ifdef TRACE if (debug_flag) gen_trace_node("GEN_ADDRESS", node); #endif while (N_KIND(node) == as_insert) { FORTUP(pre_node=(Node), N_LIST(node), ft1); compile(pre_node); ENDFORTUP(ft1); node = N_AST1(node); } node_name = N_UNQ(node); if (is_simple_name(node)) { type_name = get_type(node); if (is_renaming(node_name)) gen_ks(I_PUSH, mu_addr, node_name); else gen_s(I_PUSH_EFFECTIVE_ADDRESS, node_name); /* Arrays are treated in a different manner, depending on their */ /* nature: parameters, constants, variables... */ if (is_array_type(type_name)) { if (is_formal_parameter(node_name)) { type_name = assoc_symbol_get(node_name, FORMAL_TEMPLATE); } gen_s(I_PUSH_EFFECTIVE_ADDRESS, type_name); } } else { switch (nk = N_KIND(node)) { case as_raise: compile(node); break; case as_index: gen_subscript(node); break; case as_slice: array_node = N_AST1(node); range_node = N_AST2(node); /*range_name = N_UNQ(range_node); -- never used ds 7-8-85 */ /* Note: case of type simple name changed into range attribute */ /* by expander */ if (N_KIND(range_node) == as_attribute) { gen_attribute(range_node); } else { /* range */ lbd_node = N_AST1(range_node); ubd_node = N_AST2(range_node); gen_value(lbd_node); gen_value(ubd_node); } if (N_KIND(array_node) == as_attribute) { gen_attribute(array_node); } else { gen_address(array_node); } gen(I_ARRAY_SLICE); break; case as_selector: record_node = N_AST1(node); field_node = N_AST2(node); record_name = N_UNQ(record_node); record_type = get_type(record_node); field_name = N_UNQ(field_node); f_off = FIELD_OFFSET(field_name); if (f_off >= 0 && ((! has_discriminant(record_type)) || NATURE(field_name) == na_discriminant)){ if (is_simple_name(record_node) && !(is_renaming(record_name)) && is_global(record_name)) { reference_of(record_name); bse = REFERENCE_SEGMENT; off = REFERENCE_OFFSET; /* The SETL version has generate(I_PUSH_IMMEDIATE, mu_addr, * ref, field_name); * which we translate as (I_PUSH_EFFECTIVE_ADDRESS ... * ref = [bse, off+f_off]; * Replace use of explicit ref by PUSH_IMMEDIATE */ /* gen_rc(I_PUSH_IMMEDIATE, explicit_ref_new(bse, * off+f_off), ""); */ gen_kv(I_PUSH_IMMEDIATE, mu_word, int_const(bse)); gen_kv(I_PUSH_IMMEDIATE, mu_word, int_const(off+f_off)); } else { gen_address(record_node); if (f_off != 0 ) { gen_ki(I_ADD_IMMEDIATE, mu_word, f_off); } } if (is_array_type(comp_type=TYPE_OF(field_name))) { gen_s(I_PUSH_EFFECTIVE_ADDRESS, comp_type); } } else { gen_address(record_node); gen_s(I_PUSH_EFFECTIVE_ADDRESS, record_type); /* translating following assuming field_name is comment part of *-- instruction ds 7-5-86 * gen_i(I_SELECT, FIELD_NUMBER(field_name), field_name); */ gen_i(I_SELECT, (int) FIELD_NUMBER(field_name)); } break; case as_all: id_node = N_AST1(node); gen_value(id_node); if (is_array_type(N_TYPE(node))) gen_k(I_DEREF, mu_dble); break; case as_call: id_node = N_AST1(node); proc_name = N_UNQ(id_node); return_type = TYPE_OF(proc_name); gen_kc(I_DUPLICATE, kind_of(return_type), "place holder"); compile(node); /* processed from now as a procedure call */ break; case as_un_op: gen_unary(node); break; case as_op: gen_binary(node); break; case as_string_ivalue: gen_value(node); break; default: compiler_error_k("GEN_ADDRESS called with kind ", node); } } }
Node build_proc_init_ara(Symbol type_name) /*;build_proc_init_ara*/ { /* * This is the main procedure for building default initialization * procedures for array types. Those initialization procedures are * built if the type given contains some subcomponent for which a * default initialization exists (at any level of nesting), or if it * has determinants. * Note that scalar objects are not initialized at all, which implies * that they get whatever initial value is in that location in memory * This saves some time in object creation. * * All init. procedures have an 'out' parameter that designates the * object being initialized (the space has already been allocated). * */ int side_effect; Tuple tup, formals, subscripts; Symbol c_type, ip, index_t, proc_name, index_sym; Node one_component, init_stmt, out_param, i_nodes, d_node, iter_node; Fortup ft1; Node iterator, index_node; #ifdef TRACE if (debug_flag) { gen_trace_symbol("BUILD_PROC_INIT_ARR", type_name); } #endif side_effect = FALSE; /* Let's hope... TBSL */ tup = SIGNATURE(type_name); c_type = (Symbol) tup[2]; one_component = new_node(as_index); ip = INIT_PROC(base_type(c_type)); if (ip != (Symbol)0 ){ /* Use the initialization procedure for the component type */ init_stmt = (Node) build_init_call(one_component, ip, c_type, OPT_NODE); } else if (is_task_type(c_type)) { /* initialization is task creation. */ init_stmt = new_assign_node(one_component, new_create_task_node(c_type)); } else if (is_access_type(c_type)) { /* default value is the null pointer. */ init_stmt = new_assign_node(one_component, new_null_node(c_type)); } else { init_stmt = (Node) 0; } if (init_stmt != (Node)0) { /* body of initialization procedure is a loop over the indices */ /* allocating each component. Generate loop variables and code */ /* for iteration, using the attributes of the type. */ proc_name = new_unique_name("type_name+INIT"); out_param = new_param_node("param_type_name", proc_name, type_name, na_out); generate_object(N_UNQ(out_param)); formals = tup_new1((char *) out_param); subscripts = tup_new(0); FORTUP(index_t=(Symbol), index_types(type_name), ft1); /*index = index_t + 'INDEX';*/ index_sym = new_unique_name("index_t+INDEX"); NATURE (index_sym) = na_obj; TYPE_OF(index_sym) = index_t; subscripts = tup_with(subscripts, (char *)new_name_node(index_sym)); ENDFORTUP(ft1); i_nodes = new_node(as_list); /* need tup_copy since subscripts used destructively below */ N_LIST(i_nodes) = tup_copy(subscripts); /* Build the tree for the one_component of the array. */ N_AST1(one_component) = out_param; N_AST2(one_component) = i_nodes; N_TYPE(one_component) = c_type; while (tup_size(subscripts)) { /* Build loop from innermost index outwards. The iterations */ /* span the ranges of the array being initialized. */ /* dimension spanned by this loop: */ d_node = new_ivalue_node(int_const(tup_size(subscripts)), symbol_integer); iterator = new_attribute_node(ATTR_O_RANGE, new_name_node(N_UNQ(out_param)), d_node, type_name); index_node = (Node) tup_frome(subscripts); iter_node = new_node(as_for); N_AST1(iter_node) = index_node; N_AST2(iter_node) = iterator; init_stmt = new_loop_node(OPT_NODE, iter_node, tup_new1((char *)init_stmt)); } INIT_PROC(type_name) = proc_name; return initialization_proc(proc_name, type_name, formals, tup_new1((char *) init_stmt)); } else { return OPT_NODE; } }
static Tuple proc_init_rec(Symbol type_name, Tuple field_names, Node variant_node, Node out_param) /*;proc_init_rec*/ { /* * This is a subsidiary procedure to BUILD_PROC_INIT, which performs * the recursive part of construction of an initialization procedure * for a record type. * * Input: field_names is a list of component unique names (excluding * discriminants. Variant node is the AST for the variant part * of a component list. * variant_node is the variant part of the record declaration * and has the same structure as a case statement. * * out_param designates the object being initialized * * Output: the statement list required to initialize this fragment of * the record, or [] if not default initialization is needed. */ Tuple init_stmt, stmts; Node one_component, f_init, c_node, variant_list; Symbol f_type, f_name, ip; Fortup ft1; int empty_case; Tuple case_list, comp_case_list; Node choice_list, comp_list, disc_node; Node invariant_node, new_case, list_node, case_node; Tuple tup, index_list; int nb_dim, i; Node d_node, node, node1, node2, node3, node4, node5; Symbol one_index_type; /* process fixed part first. */ init_stmt = tup_new(0); FORTUP(f_name=(Symbol), field_names, ft1); one_component = new_selector_node(out_param, f_name); f_type = TYPE_OF(f_name); CONTAINS_TASK(type_name) = (char *) ((int)CONTAINS_TASK(type_name) | (int) CONTAINS_TASK(f_type)); f_init = (Node) default_expr(f_name); if (f_init != OPT_NODE) { init_stmt = tup_with(init_stmt, (char *) new_assign_node(one_component, remove_discr_ref(f_init, out_param))); } else if ((ip = INIT_PROC(base_type(f_type)))!=(Symbol)0) { init_stmt = tup_with(init_stmt, (char *) build_init_call(one_component, ip, f_type, out_param)); } else if (is_task_type(f_type)) { init_stmt = tup_with(init_stmt, (char *) new_assign_node(one_component, new_create_task_node(f_type))); } else if (is_access_type(f_type)) { init_stmt = tup_with(init_stmt, (char *) new_assign_node(one_component, new_null_node(f_type))); } /* if we have an aray then we have to check if its bounds are * compatible with the index subtypes (of the unconstrained array) * (This code was generated beforehand in type.c ("need_qual_r") but * it was wrong : we have to test the bounds only if the field is * present (case of variant record). * The generation of the tests is easier here */ if (is_array_type (f_type)) { tup = (Tuple) SIGNATURE(TYPE_OF(f_type)); index_list = tup_copy((Tuple) tup[1]); nb_dim = tup_size(index_list); for (i = 1; i <= nb_dim; i++) { one_index_type = (Symbol) (tup_fromb (index_list)); d_node = new_ivalue_node(int_const(i), symbol_integer); node1 = new_attribute_node(ATTR_O_FIRST, one_component, d_node, one_index_type); node2 = new_attribute_node(ATTR_O_LAST, one_component, d_node, one_index_type); node3 = new_attribute_node(ATTR_T_FIRST, new_name_node(one_index_type), OPT_NODE, one_index_type); node4 = new_attribute_node(ATTR_T_LAST, new_name_node(one_index_type), OPT_NODE, one_index_type); node5 = new_binop_node(symbol_or, new_binop_node(symbol_lt, node1, node3, symbol_boolean), new_binop_node(symbol_gt, node2, node4, symbol_boolean), symbol_boolean); node = node_new (as_list); make_if_node(node, tup_new1((char *) new_cond_stmts_node( new_binop_node(symbol_and, new_binop_node(symbol_le, node1, node2, symbol_boolean), node5, symbol_boolean), new_raise_node(symbol_constraint_error))), OPT_NODE); init_stmt = tup_with(init_stmt, (char *) (node)); } } ENDFORTUP(ft1); /* then build case statement to parallel structure of variant part. */ empty_case = TRUE; /* assumption */ if (variant_node != OPT_NODE) { disc_node= N_AST1(variant_node); variant_list = N_AST2(variant_node); case_list = tup_new(0); comp_case_list = N_LIST(variant_list); FORTUP(c_node=(Node), comp_case_list, ft1); choice_list = N_AST1(c_node); comp_list = N_AST2(c_node); invariant_node = N_AST1(comp_list); variant_node = N_AST2(comp_list); field_names = build_comp_names(invariant_node); stmts = proc_init_rec(type_name,field_names,variant_node, out_param); /*empty_case and= stmts = [];*/ empty_case = empty_case ? (tup_size(stmts)==0) : FALSE; new_case = (N_KIND(c_node) == as_others_choice) ? new_node(as_others_choice) : new_node(as_variant_choices); N_AST1(new_case) = copy_tree(choice_list); N_AST2(new_case) = new_statements_node(stmts); case_list = tup_with(case_list, (char *) new_case ); ENDFORTUP(ft1); if (! empty_case) { /* Build a case statement ruled by the value of the discriminant */ /* for this variant part. */ list_node = new_node(as_list); N_LIST(list_node) = case_list; case_node = new_node(as_case); N_AST1(case_node) = new_selector_node(out_param, N_UNQ(disc_node)); N_AST2(case_node) = list_node; init_stmt = tup_with(init_stmt, (char *) case_node ); } } return init_stmt; }
static Const fold_op(Node node) /*;fold_op*/ { Node opn, arg1, arg2, oplist; Const result, op1, op2, tryc; Symbol sym, op_name; int *uint; int rm; Tuple tup; int res, overflow; opn = N_AST1(node); oplist = N_AST2(node); tup = N_LIST(oplist); arg1 = (Node) tup[1]; arg2 = (Node) tup[2]; op1 = const_fold(arg1); op2 = const_fold(arg2); op_name = N_UNQ(opn); /* If either operand raises and exception, so does the operation */ if (N_KIND(arg1) == as_raise) { copy_attributes(arg1, node); return const_new(CONST_OM); } if (N_KIND(arg2) == as_raise && op_name != symbol_andthen && op_name != symbol_orelse) { copy_attributes(arg2, node); return const_new(CONST_OM); } if (is_const_om(op1) || (is_const_om(op2) && (op_name != symbol_in || op_name != symbol_notin))) { return const_new(CONST_OM); } sym = op_name; if ( sym == symbol_addi || sym == symbol_addfl) { if (sym == symbol_addi) { res = word_add(INTV(op1), INTV(op2), &overflow); if (overflow) { create_raise(node, symbol_constraint_error); result = const_new(CONST_OM); } else result = int_const(res); } else result = real_const(REALV(op1) + REALV(op2)); } else if ( sym == symbol_addfx) { const_check(op1, CONST_RAT); const_check(op2, CONST_RAT); result= rat_const(rat_add(RATV(op1), RATV(op2))); } else if ( sym == symbol_subi) { if (is_const_int(op1)) { if (is_const_int(op2)) { res = word_sub(INTV(op1), INTV(op2), &overflow); if (overflow) { create_raise(node, symbol_constraint_error); result = const_new(CONST_OM); } else result = int_const(res); } else { chaos("fold_op: subi operand types"); } } } else if (sym == symbol_subfl) { result = real_const(REALV(op1) - REALV(op2)); } else if ( sym == symbol_subfx) { const_check(op1, CONST_RAT); const_check(op2, CONST_RAT); result= rat_const(rat_sub(RATV(op1), RATV(op2))); } else if ( sym == symbol_muli) { #ifdef TBSL -- need to check for overflow and convert result back to int if not -- note that low-level setl is missing calls to check_overflow that -- are present in high-level and should be in low-level as well result = int_mul(int_fri(op1), int_fri(op2)); #endif /* until overflow check in */ const_check(op1, CONST_INT); const_check(op2, CONST_INT); res = word_mul(INTV(op1), INTV(op2), &overflow); if (overflow) { create_raise(node, symbol_constraint_error); result = const_new(CONST_OM); } else result = int_const(res); }
static Const fold_unop(Node node) /*;fold_unop*/ { Node opn, oplist; Const result, op1; int op1_kind; Symbol sym; opn = N_AST1(node); oplist = N_AST2(node); op1 = const_fold((Node) (N_LIST(oplist))[1]); if (is_const_om(op1)) return op1; op1_kind = op1->const_kind; sym = N_UNQ(opn); if (sym == symbol_addui) { /* the "+" can be ignored if it is used as a unary op */ result = op1; } else if (sym == symbol_addufl) { result = op1; } else if (sym == symbol_addufx) { result = op1; } else if (sym == symbol_subui || sym == symbol_subufl || sym == symbol_subufx) { if (is_simple_value(op1)) { if (sym == symbol_subui) { if (is_const_int(op1)) { if (INTV(op1) == ADA_MIN_INTEGER) { create_raise(node, symbol_constraint_error); result = const_new(CONST_OM); } else { result = int_const(-INTV(op1)); } } else if (is_const_uint(op1)) result = uint_const(int_umin(UINTV(op1))); else chaos("eval:subui bad type"); } else if (sym == symbol_subufl) { const_check(op1, CONST_REAL); result = real_const(-REALV(op1)); } } else { const_check(op1, CONST_RAT); result= rat_const(rat_umin(RATV(op1))); } } else if ( sym == symbol_not) { if (is_simple_value (op1)) { if (op1_kind == CONST_INT) result = int_const(1-INTV(op1)); /*bnot in setl */ else chaos("fold_unop: bad kind"); } else { /*TBSL*/ result = const_new(CONST_OM); } } else if ( sym == symbol_absi || sym == symbol_absfl || sym == symbol_absfx) { if (is_simple_value(op1)) { if (sym == symbol_absi) { if (op1_kind == CONST_INT) result = int_const(abs(INTV(op1))); else if (op1_kind == CONST_UINT)chaos("fold_unit absi in uint"); else chaos("fold_unop: bad kind"); } else if (sym == symbol_absfl) { result = real_const(fabs(REALV(op1))); } } else { result= rat_const(rat_abs(RATV(op1))); } } return result; }