static void parse_test_init_expr(stmt *t, struct stmt_ctx *ctx) { where here; where_cc1_current(&here); EAT(token_open_paren); /* if C99, we create a new scope here, for e.g. * if(5 > (enum { a, b })a){ return a; } return b; * "return b" can't see 'b' since its scope is only the if * * C90 drags the scope of the enum up to the enclosing block */ if(cc1_std >= STD_C99){ ctx->scope = t->symtab = symtab_new(t->symtab, &here); } if(parse_at_decl(ctx->scope, 1)){ decl *d; /* if we are at a type, push a scope for it, for * for(int i ...), if(int i = ...) etc */ symtable *init_scope = symtab_new(t->symtab, &here); t->flow = stmt_flow_new(init_scope); d = parse_decl( DECL_SPEL_NEED, 0, init_scope, init_scope); UCC_ASSERT(d, "at decl, but no decl?"); UCC_ASSERT( t->flow->for_init_symtab == init_scope, "wrong scope for stmt-init"); flow_fold(t->flow, &t->symtab); ctx->scope = t->symtab; /* `d' is added to the scope implicitly */ if(accept(token_comma)){ /* if(int i = 5, i > f()){ ... } */ t->expr = parse_expr_exp(ctx->scope, 0); }else{ /* if(int i = 5) -> if(i) */ t->expr = expr_new_identifier(d->spel); } }else{ t->expr = parse_expr_exp(t->symtab, 0); } FOLD_EXPR(t->expr, t->symtab); EAT(token_close_paren); }
void uneat(enum token t) { UCC_ASSERT(curtok_save == token_unknown, "curtok regurgitate buffer full"); /* if current is an identifier, abort, * since we can't hold two in currentspelling */ UCC_ASSERT(curtok_save == token_identifier ? t != token_identifier : 1, "can't save another identifier"); curtok_save = curtok; curtok = t; }
static void print_expr_val(expr *e) { consty k; const_fold(e, &k); UCC_ASSERT(k.type == CONST_NUM, "val expected"); UCC_ASSERT((k.bits.num.suffix & VAL_UNSIGNED) == 0, "TODO: unsigned"); if(K_INTEGRAL(k.bits.num)) fprintf(cc1_out, NUMERIC_FMT_D, k.bits.num.val.i); else fprintf(cc1_out, NUMERIC_FMT_LD, k.bits.num.val.f); }
type *type_nav_MAX_FOR(struct type_nav *root, unsigned sz, int is_signed) { enum type_primitive p = type_primitive_not_less_than_size(sz, is_signed); UCC_ASSERT(p != type_unknown, "no type max for %u", sz); return type_nav_btype(root, p); }
void fold_expr_compound_lit(expr *e, symtable *stab) { decl *d = e->bits.complit.decl; int static_ctx = e->bits.complit.static_ctx; /* global or static */ if(cc1_std < STD_C99) cc1_warn_at(&e->where, c89_compound_literal, "compound literals are a C99 feature"); /* if(!stab->parent) assert(static_ctx); * * except things like sizeof() just pass 0 for static_ctx, * as it doesn't matter, we're not code-gen'd */ if(!stab->parent) static_ctx = 1; if(COMP_LIT_INITIALISED(e)) return; /* being called from fold_gen_init_assignment_base */ /* must be set before the recursive fold_gen_init_assignment_base */ e->tree_type = d->ref; if(static_ctx){ assert(!d->spel_asm); d->spel_asm = out_label_data_store(STORE_COMP_LIT); d->store = store_static; } e->bits.complit.sym = sym_new_and_prepend_decl( stab, d, static_ctx ? sym_global : sym_local); /* fold the initialiser */ UCC_ASSERT(d->bits.var.init.dinit, "no init for comp.literal"); decl_init_brace_up_fold(d, stab); /* * update the type, for example if an array type has been completed * this is done before folds, for array bounds checks */ e->tree_type = d->ref; if(!static_ctx){ /* create the code for assignemnts * * - we must create a nested scope, * otherwise any other decls in stab's scope will * be generated twice - once for the scope we're nested in (stab), * and again on our call to gen_stmt() in our gen function */ decl_init_create_assignments_base_and_fold(d, e, stab); }else{ fold_decl_global_init(d, stab); } }
void fold_expr_assign_compound(expr *e, symtable *stab) { const char *const desc = "compound assignment"; #define lvalue e->lhs fold_inc_writes_if_sym(lvalue, stab); fold_expr_nodecay(e->lhs, stab); FOLD_EXPR(e->rhs, stab); fold_check_expr(e->lhs, FOLD_CHK_NO_ST_UN, desc); fold_check_expr(e->rhs, FOLD_CHK_NO_ST_UN, desc); /* skip the addr we inserted */ if(!expr_must_lvalue(lvalue, desc)){ /* prevent ICE from type_size(vla), etc */ e->tree_type = lvalue->tree_type; return; } expr_assign_const_check(lvalue, &e->where); fold_check_restrict(lvalue, e->rhs, desc, &e->where); UCC_ASSERT(op_can_compound(e->bits.compoundop.op), "non-compound op in compound expr"); /*expr_promote_int_if_smaller(&e->lhs, stab); * lhs int promotion is handled in code-gen */ expr_promote_int_if_smaller(&e->rhs, stab); { type *tlhs, *trhs; type *resolved = op_required_promotion( e->bits.compoundop.op, lvalue, e->rhs, &e->where, desc, &tlhs, &trhs); if(tlhs){ /* must cast the lvalue, then down cast once the operation is done * special handling for expr_kind(e->lhs, cast) is done in the gen-code */ e->bits.compoundop.upcast_ty = tlhs; }else if(trhs){ fold_insert_casts(trhs, &e->rhs, stab); } e->tree_type = lvalue->tree_type; (void)resolved; /*type_free_1(resolved); XXX: memleak */ } /* type check is done in op_required_promotion() */ #undef lvalue }
char *asm_label_static_local(const char *funcsp, const char *spel) { char *ret; UCC_ASSERT(funcsp, "no spel for %s", __func__); ret = umalloc(strlen(funcsp) + strlen(spel) + 9); sprintf(ret, "%s.static_%s", funcsp, spel); return ret; }
void funcargs_empty(funcargs *func) { if(func->arglist){ UCC_ASSERT(!func->arglist[1], "empty_args called when it shouldn't be"); decl_free(func->arglist[0]); free(func->arglist); func->arglist = NULL; } func->args_void = 0; }
unsigned type_size(type *r, const where *from) { switch(r->type){ case type_auto: ICE("__auto_type"); case type_btype: return btype_size(r->bits.type, from); case type_tdef: { decl *d = r->bits.tdef.decl; type *sub; if(d) return type_size(d->ref, from); sub = r->bits.tdef.type_of->tree_type; UCC_ASSERT(sub, "type_size for unfolded typedef"); return type_size(sub, from); } case type_attr: case type_cast: case type_where: return type_size(r->ref, from); case type_ptr: case type_block: return platform_word_size(); case type_func: /* function size is one, sizeof(main) is valid */ return 1; case type_array: { integral_t sz; if(type_is_void(r->ref)) die_at(from, "array of void"); if(!r->bits.array.size) die_at(from, "array has an incomplete size"); sz = const_fold_val_i(r->bits.array.size); return sz * type_size(r->ref, from); } } ucc_unreach(0); }
char *out_label_static_local(const char *funcsp, const char *spel) { char *ret; int len; UCC_ASSERT(funcsp, "no spel for %s", __func__); len = strlen(funcsp) + strlen(spel) + 16; ret = umalloc(len); SNPRINTF(ret, len, "%s.static%d_%s", funcsp, static_last++, spel); return ret; }
void bitfield_trunc_check(decl *mem, expr *from) { consty k; if(expr_kind(from, cast)){ /* we'll warn about bitfield truncation, prevent warnings * about cast truncation */ from->expr_cast_implicit = 0; } const_fold(from, &k); if(k.type == CONST_NUM){ const sintegral_t kexp = k.bits.num.val.i; /* highest may be -1 - kexp is zero */ const int highest = integral_high_bit(k.bits.num.val.i, from->tree_type); const int is_signed = type_is_signed(mem->bits.var.field_width->tree_type); const_fold(mem->bits.var.field_width, &k); UCC_ASSERT(k.type == CONST_NUM, "bitfield size not val?"); UCC_ASSERT(K_INTEGRAL(k.bits.num), "fp bitfield size?"); if(highest > (sintegral_t)k.bits.num.val.i || (is_signed && highest == (sintegral_t)k.bits.num.val.i)) { sintegral_t kexp_to = kexp & ~(-1UL << k.bits.num.val.i); cc1_warn_at(&from->where, bitfield_trunc, "truncation in store to bitfield alters value: " "%" NUMERIC_FMT_D " -> %" NUMERIC_FMT_D, kexp, kexp_to); } } }
static void sentinel_check(where *w, expr *e, expr **args, const int variadic, const int nstdargs, symtable *stab) { #define ATTR_WARN_RET(w, ...) \ do{ cc1_warn_at(w, attr_sentinel, __VA_ARGS__); return; }while(0) attribute *attr = func_or_builtin_attr_present(e, attr_sentinel); int i, nvs; expr *sentinel; if(!attr) return; if(!variadic) return; /* warning emitted elsewhere, on the decl */ if(attr->bits.sentinel){ consty k; FOLD_EXPR(attr->bits.sentinel, stab); const_fold(attr->bits.sentinel, &k); if(k.type != CONST_NUM || !K_INTEGRAL(k.bits.num)) die_at(&attr->where, "sentinel attribute not reducible to integer constant"); i = k.bits.num.val.i; }else{ i = 0; } nvs = dynarray_count(args) - nstdargs; if(nvs == 0) ATTR_WARN_RET(w, "not enough variadic arguments for a sentinel"); UCC_ASSERT(nvs >= 0, "too few args"); if(i >= nvs) ATTR_WARN_RET(w, "sentinel index is not a variadic argument"); sentinel = args[(nstdargs + nvs - 1) - i]; /* must be of a pointer type, printf("%p\n", 0) is undefined */ if(!expr_is_null_ptr(sentinel, NULL_STRICT_ANY_PTR)) ATTR_WARN_RET(&sentinel->where, "sentinel argument expected (got %s)", type_to_str(sentinel->tree_type)); #undef ATTR_WARN_RET }
void fold_expr_assign_compound(expr *e, symtable *stab) { expr *const lvalue = e->lhs; fold_inc_writes_if_sym(lvalue, stab); fold_expr_no_decay(e->lhs, stab); FOLD_EXPR(e->rhs, stab); fold_check_expr(e->lhs, FOLD_CHK_NO_ST_UN, "compound assignment"); fold_check_expr(e->rhs, FOLD_CHK_NO_ST_UN, "compound assignment"); /* skip the addr we inserted */ expr_must_lvalue(lvalue); expr_assign_const_check(lvalue, &e->where); fold_check_restrict(lvalue, e->rhs, "compound assignment", &e->where); UCC_ASSERT(op_can_compound(e->op), "non-compound op in compound expr"); { type *tlhs, *trhs; type *resolved = op_required_promotion(e->op, lvalue, e->rhs, &e->where, &tlhs, &trhs); if(tlhs){ /* must cast the lvalue, then down cast once the operation is done * special handling for expr_kind(e->lhs, cast) is done in the gen-code */ fold_insert_casts(tlhs, &e->lhs, stab); /* casts may be inserted anyway, and don't want to rely on * .implicit_cast stuff */ e->bits.compound_upcast = 1; }else if(trhs){ fold_insert_casts(trhs, &e->rhs, stab); } e->tree_type = lvalue->tree_type; (void)resolved; /*type_free_1(resolved); XXX: memleak */ } /* type check is done in op_required_promotion() */ }
const out_val *gen_expr_assign(const expr *e, out_ctx *octx) { const out_val *val, *store; UCC_ASSERT(!e->assign_is_post, "assign_is_post set for non-compound assign"); assert(!type_is_s_or_u(e->tree_type)); val = gen_expr(e->rhs, octx); store = gen_expr(e->lhs, octx); out_val_retain(octx, store); out_store(octx, store, val); /* re-read from the store, * e.g. if the value has undergone bitfield truncation */ return out_deref(octx, store); }
void fold_stmt_for(stmt *s) { s->lbl_break = asm_label_flow("for_start"); s->lbl_continue = asm_label_flow("for_contiune"); if(s->flow->for_init_decls){ expr *init_exp = fold_for_if_init_decls(s); UCC_ASSERT(!s->flow->for_init, "for init in c99 for-decl mode"); s->flow->for_init = init_exp; } #define FOLD_IF(x) if(x) fold_expr(x, s->flow->for_init_symtab) FOLD_IF(s->flow->for_init); FOLD_IF(s->flow->for_while); FOLD_IF(s->flow->for_inc); #undef FOLD_IF if(s->flow->for_while){ fold_need_expr(s->flow->for_while, "for-while", 1); OPT_CHECK(s->flow->for_while, "constant expression in for"); } fold_stmt(s->lhs); /* * need an extra generation for for_init, * since it's generated unlike other loops (symtab_new() in parse.c) */ gen_code_decls(s->flow->for_init_symtab); #ifdef SYMTAB_DEBUG fprintf(stderr, "for-code st:\n"); PRINT_STAB(s->lhs, 1); fprintf(stderr, "for-init st:\n"); print_stab(s->flow->for_init_symtab, 0, NULL); fprintf(stderr, "for enclosing scope st:\n"); PRINT_STAB(s, 0); #endif }
symtable *fold_stmt_test_init_expr(stmt *s, const char *which) { if(s->flow){ /* if(char *x = ...) */ expr *dinit; dinit = fold_for_if_init_decls(s); if(!dinit) DIE_AT(&s->where, "no initialiser to test in %s", which); UCC_ASSERT(!s->expr, "%s-expr in c99_ucc %s-init mode", which, which); s->expr = dinit; return s->flow->for_init_symtab; } return s->symtab; }
stmt *stmt_new( func_fold_stmt *f_fold, func_gen_stmt *f_gen, func_gen_stmt *f_gen_style, func_str_stmt *f_str, void (*init)(stmt *), symtable *stab) { stmt *s = umalloc(sizeof *s); where_cc1_current(&s->where); UCC_ASSERT(stab, "no symtable for statement"); s->symtab = stab; s->f_fold = f_fold; switch(cc1_backend){ case BACKEND_ASM: s->f_gen = f_gen; break; case BACKEND_PRINT: case BACKEND_STYLE: s->f_gen = f_gen_style; break; default: ICE("bad backend"); } s->f_str = f_str; init(s); s->kills_below_code = stmt_kind(s, break) || stmt_kind(s, return) || stmt_kind(s, goto) || stmt_kind(s, continue); return s; }
void fold_expr_funcall(expr *e, symtable *stab) { decl *df; funcargs *args_exp; if(expr_kind(e->expr, identifier) && e->expr->spel){ char *const sp = e->expr->spel; e->sym = symtab_search(stab, sp); if(!e->sym){ df = decl_new_where(&e->where); df->type->primitive = type_int; df->type->spec |= spec_extern; cc1_warn_at(&e->where, 0, WARN_IMPLICIT_FUNC, "implicit declaration of function \"%s\"", sp); df->spel = sp; df->funcargs = funcargs_new(); if(e->funcargs) /* set up the funcargs as if it's "x()" - i.e. any args */ function_empty_args(df->funcargs); e->sym = symtab_add(symtab_root(stab), df, sym_global, SYMTAB_WITH_SYM, SYMTAB_PREPEND); }else{ df = e->sym->decl; } fold_expr(e->expr, stab); }else{ fold_expr(e->expr, stab); /* * convert int (*)() to remove the deref */ if(decl_is_func_ptr(e->expr->tree_type)){ /* XXX: memleak */ e->expr = e->expr->lhs; fprintf(stderr, "FUNCPTR\n"); }else{ fprintf(stderr, "decl %s\n", decl_to_str(e->expr->tree_type)); } df = e->expr->tree_type; if(!decl_is_callable(df)){ die_at(&e->expr->where, "expression %s (%s) not callable", e->expr->f_str(), decl_to_str(df)); } } e->tree_type = decl_copy(df); /* * int (*x)(); * (*x)(); * evaluates to tree_type = int; */ decl_func_deref(e->tree_type); if(e->funcargs){ expr **iter; for(iter = e->funcargs; *iter; iter++) fold_expr(*iter, stab); } /* func count comparison, only if the func has arg-decls, or the func is f(void) */ args_exp = decl_funcargs(e->tree_type); UCC_ASSERT(args_exp, "no funcargs for decl %s", df->spel); if(args_exp->arglist || args_exp->args_void){ expr **iter_arg; decl **iter_decl; int count_decl, count_arg; count_decl = count_arg = 0; for(iter_arg = e->funcargs; iter_arg && *iter_arg; iter_arg++, count_arg++); for(iter_decl = args_exp->arglist; iter_decl && *iter_decl; iter_decl++, count_decl++); if(count_decl != count_arg && (args_exp->variadic ? count_arg < count_decl : 1)){ die_at(&e->where, "too %s arguments to function %s (got %d, need %d)", count_arg > count_decl ? "many" : "few", df->spel, count_arg, count_decl); } if(e->funcargs){ funcargs *argument_decls = funcargs_new(); for(iter_arg = e->funcargs; *iter_arg; iter_arg++) dynarray_add((void ***)&argument_decls->arglist, (*iter_arg)->tree_type); fold_funcargs_equal(args_exp, argument_decls, 1, &e->where, "argument", df->spel); funcargs_free(argument_decls, 0); } } }
static void impl_overlay_mem_reg( out_ctx *octx, unsigned memsz, unsigned nregs, struct vreg regs[], int mem2reg, const out_val *ptr) { const unsigned pws = platform_word_size(); struct vreg *cur_reg = regs; unsigned reg_i = 0; if(memsz == 0){ out_val_release(octx, ptr); return; } UCC_ASSERT( nregs * pws >= memsz, "not enough registers for memory overlay"); out_comment(octx, "overlay, %s2%s(%u)", mem2reg ? "mem" : "reg", mem2reg ? "reg" : "mem", memsz); if(!mem2reg){ /* reserve all registers so we don't accidentally wipe before the spill */ for(reg_i = 0; reg_i < nregs; reg_i++) v_reserve_reg(octx, ®s[reg_i]); } for(;; cur_reg++, reg_i++){ /* read/write whatever size is required */ type *this_ty; unsigned this_sz; if(cur_reg->is_float){ UCC_ASSERT(memsz >= 4, "float for memsz %u?", memsz); this_ty = type_nav_btype( cc1_type_nav, memsz > 4 ? type_double : type_float); }else{ this_ty = type_nav_MAX_FOR(cc1_type_nav, memsz); } this_sz = type_size(this_ty, NULL); UCC_ASSERT(this_sz <= memsz, "reading/writing too much memory"); ptr = out_change_type(octx, ptr, type_ptr_to(this_ty)); out_val_retain(octx, ptr); if(mem2reg){ const out_val *fetched; /* can use impl_deref, as we have a register already, * and know that the memory is an lvalue and not a bitfield * * this means we can load straight into the desired register */ fetched = impl_deref(octx, ptr, cur_reg); UCC_ASSERT(reg_i < nregs, "reg oob"); if(fetched->type != V_REG || !vreg_eq(&fetched->bits.regoff.reg, cur_reg)){ /* move to register */ v_freeup_reg(octx, cur_reg); fetched = v_to_reg_given(octx, fetched, cur_reg); } out_flush_volatile(octx, fetched); v_reserve_reg(octx, cur_reg); /* prevent changes */ }else{ const out_val *vreg = v_new_reg(octx, NULL, this_ty, cur_reg); out_store(octx, ptr, vreg); } memsz -= this_sz; /* early termination */ if(memsz == 0) break; /* increment our memory pointer */ ptr = out_change_type( octx, ptr, type_ptr_to(type_nav_btype(cc1_type_nav, type_uchar))); ptr = out_op(octx, op_plus, ptr, out_new_l( octx, type_nav_btype(cc1_type_nav, type_intptr_t), pws)); } out_val_release(octx, ptr); /* done, unreserve all registers */ for(reg_i = 0; reg_i < nregs; reg_i++) v_unreserve_reg(octx, ®s[reg_i]); }
void fold_expr_struct(expr *e, symtable *stab) { /* * lhs = any ptr-to-struct expr * rhs = struct member ident */ const int ptr_expect = !e->expr_is_st_dot; struct_union_enum_st *sue; char *spel; fold_expr_no_decay(e->lhs, stab); /* don't fold the rhs - just a member name */ if(e->rhs){ UCC_ASSERT(expr_kind(e->rhs, identifier), "struct/union member not identifier (%s)", e->rhs->f_str()); UCC_ASSERT(!e->bits.struct_mem.d, "already have a struct-member"); spel = e->rhs->bits.ident.spel; }else{ UCC_ASSERT(e->bits.struct_mem.d, "no member specified already?"); spel = NULL; } /* we access a struct, of the right ptr depth */ { type *r = e->lhs->tree_type; if(ptr_expect){ type *rtest = type_is(r, type_ptr); if(!rtest && !(rtest = type_is(r, type_array))) goto err; r = rtest->ref; /* safe - rtest is an array */ } if(!(sue = type_is_s_or_u(r))){ err: die_at(&e->lhs->where, "'%s' (%s-expr) is not a %sstruct or union (member %s)", type_to_str(e->lhs->tree_type), e->lhs->f_str(), ptr_expect ? "pointer to " : "", spel); } } if(!sue_complete(sue)){ char wbuf[WHERE_BUF_SIZ]; die_at(&e->lhs->where, "%s incomplete type (%s)\n" "%s: note: forward declared here", ptr_expect ? "dereferencing pointer to" : "accessing member of", type_to_str(e->lhs->tree_type), where_str_r(wbuf, &sue->where)); } if(spel){ /* found the struct, find the member */ decl *d_mem = struct_union_member_find(sue, spel, &e->bits.struct_mem.extra_off, NULL); if(!d_mem) die_at(&e->where, "%s %s has no member named \"%s\"", sue_str(sue), sue->spel, spel); e->rhs->tree_type = (e->bits.struct_mem.d = d_mem)->ref; }/* else already have the member */ /* * if it's a.b, convert to (&a)->b for asm gen * e = { lhs = "a", rhs = "b", type = dot } * e = { * type = ptr, * lhs = { cast<void *>, expr = { expr = "a", type = addr } }, * rhs = "b", * } */ if(!ptr_expect){ expr *cast, *addr; addr = expr_new_addr(e->lhs); cast = expr_new_cast(addr, type_ptr_to(type_nav_btype(cc1_type_nav, type_void)), 1); e->lhs = cast; e->expr_is_st_dot = 0; FOLD_EXPR(e->lhs, stab); } /* pull qualifiers from the struct to the member */ e->tree_type = type_qualify( e->bits.struct_mem.d->ref, type_qual(e->lhs->tree_type)); }
static void asm_declare_init(enum section_type sec, decl_init *init, type *tfor) { type *r; if(init == DYNARRAY_NULL) init = NULL; if(!init){ /* don't initialise flex-arrays */ if(!type_is_incomplete_array(tfor)){ asm_declare_pad(sec, type_size(tfor, NULL), "null init"/*, type_to_str(tfor)*/); }else{ asm_out_section(sec, ASM_COMMENT " flex array init skipped\n"); } }else if((r = type_is_primitive(tfor, type_struct))){ /* array of stmts for each member * assumes the ->bits.inits order is member order */ struct_union_enum_st *const sue = r->bits.type->sue; sue_member **mem; decl_init **i; unsigned end_of_last = 0; struct bitfield_val *bitfields = NULL; unsigned nbitfields = 0; decl *first_bf = NULL; expr *copy_from_exp; UCC_ASSERT(init->type == decl_init_brace, "unbraced struct"); #define DEBUG(s, ...) /*fprintf(f, "\033[35m" s "\033[m\n", __VA_ARGS__)*/ i = init->bits.ar.inits; /* check for compound-literal copy-init */ if((copy_from_exp = decl_init_is_struct_copy(init, sue))){ decl_init *copy_from_init; copy_from_exp = expr_skip_lval2rval(copy_from_exp); /* the only struct-expression that's possible * in static context is a compound literal */ assert(expr_kind(copy_from_exp, compound_lit) && "unhandled expression init"); copy_from_init = copy_from_exp->bits.complit.decl->bits.var.init.dinit; assert(copy_from_init->type == decl_init_brace); i = copy_from_init->bits.ar.inits; } /* iterate using members, not inits */ for(mem = sue->members; mem && *mem; mem++) { decl *d_mem = (*mem)->struct_member; decl_init *di_to_use = NULL; if(i){ int inc = 1; if(*i == NULL) inc = 0; else if(*i != DYNARRAY_NULL) di_to_use = *i; if(inc){ i++; if(!*i) i = NULL; /* reached end */ } } DEBUG("init for %ld/%s, %s", mem - sue->members, d_mem->spel, di_to_use ? di_to_use->bits.expr->f_str() : NULL); /* only pad if we're not on a bitfield or we're on the first bitfield */ if(!d_mem->bits.var.field_width || !first_bf){ DEBUG("prev padding, offset=%d, end_of_last=%d", d_mem->struct_offset, end_of_last); UCC_ASSERT( d_mem->bits.var.struct_offset >= end_of_last, "negative struct pad, sue %s, member %s " "offset %u, end_of_last %u", sue->spel, decl_to_str(d_mem), d_mem->bits.var.struct_offset, end_of_last); asm_declare_pad(sec, d_mem->bits.var.struct_offset - end_of_last, "prev struct padding"); } if(d_mem->bits.var.field_width){ if(!first_bf || d_mem->bits.var.first_bitfield){ if(first_bf){ DEBUG("new bitfield group (%s is new boundary), old:", d_mem->spel); /* next bitfield group - store the current */ bitfields_out(sec, bitfields, &nbitfields, first_bf->ref); } first_bf = d_mem; } bitfields = bitfields_add( bitfields, &nbitfields, d_mem, di_to_use); }else{ if(nbitfields){ DEBUG("at non-bitfield, prev-bitfield out:", 0); bitfields_out(sec, bitfields, &nbitfields, first_bf->ref); first_bf = NULL; } DEBUG("normal init for %s:", d_mem->spel); asm_declare_init(sec, di_to_use, d_mem->ref); } if(type_is_incomplete_array(d_mem->ref)){ UCC_ASSERT(!mem[1], "flex-arr not at end"); }else if(!d_mem->bits.var.field_width || d_mem->bits.var.first_bitfield){ unsigned last_sz = type_size(d_mem->ref, NULL); end_of_last = d_mem->bits.var.struct_offset + last_sz; DEBUG("done with member \"%s\", end_of_last = %d", d_mem->spel, end_of_last); } } if(nbitfields) bitfields_out(sec, bitfields, &nbitfields, first_bf->ref); free(bitfields); /* need to pad to struct size */ asm_declare_pad(sec, sue_size(sue, NULL) - end_of_last, "struct tail"); }else if((r = type_is(tfor, type_array))){ size_t i, len; decl_init **p; type *next = type_next(tfor); UCC_ASSERT(init->type == decl_init_brace, "unbraced struct"); if(type_is_incomplete_array(tfor)){ len = dynarray_count(init->bits.ar.inits); }else{ UCC_ASSERT(type_is_complete(tfor), "incomplete array/type init"); len = type_array_len(tfor); } for(i = len, p = init->bits.ar.inits; i > 0; i--) { decl_init *this = NULL; if(*p){ this = *p++; if(this != DYNARRAY_NULL && this->type == decl_init_copy){ /*fprintf(f, "# copy from %lu\n", DECL_INIT_COPY_IDX(this, init));*/ struct init_cpy *icpy = *this->bits.range_copy; /* resolve the copy */ this = icpy->range_init; } } asm_declare_init(sec, this, next); } }else if((r = type_is_primitive(tfor, type_union))){ /* union inits are decl_init_brace with spaces up to the first union init, * then NULL/end of the init-array */ struct_union_enum_st *sue = type_is_s_or_u(r); unsigned i, sub = 0; decl_init *u_init; UCC_ASSERT(init->type == decl_init_brace, "brace init expected"); /* skip the empties until we get to one */ for(i = 0; init->bits.ar.inits[i] == DYNARRAY_NULL; i++); if((u_init = init->bits.ar.inits[i])){ decl *mem = sue->members[i]->struct_member; type *mem_r = mem->ref; /* union init, member at index `i' */ if(mem->bits.var.field_width){ /* we know it's integral */ struct bitfield_val bfv; ASSERT_SCALAR(u_init); bitfield_val_set(&bfv, u_init->bits.expr, mem->bits.var.field_width); asm_declare_init_bitfields(sec, &bfv, 1, mem_r); }else{ asm_declare_init(sec, u_init, mem_r); } sub = type_size(mem_r, NULL); } /* else null union init */ asm_declare_pad(sec, type_size(r, NULL) - sub, "union extra"); }else{ /* scalar */ expr *exp = init->bits.expr; UCC_ASSERT(init->type == decl_init_scalar, "scalar init expected"); /* exp->tree_type should match tfor */ { char buf[TYPE_STATIC_BUFSIZ]; UCC_ASSERT( type_cmp(exp->tree_type, tfor, TYPE_CMP_ALLOW_TENATIVE_ARRAY) != TYPE_NOT_EQUAL, "mismatching init types: %s and %s", type_to_str_r(buf, exp->tree_type), type_to_str(tfor)); } /* use tfor, since "abc" has type (char[]){(int)'a', (int)'b', ...} */ DEBUG(" scalar init for %s:", type_to_str(tfor)); static_val(sec, tfor, exp); } }
void fold_expr_funcall(expr *e, symtable *stab) { type *func_ty; funcargs *args_from_decl; char *sp = NULL; unsigned count_decl; check_implicit_funcall(e, stab, &sp); FOLD_EXPR(e->expr, stab); func_ty = e->expr->tree_type; if(!type_is_callable(func_ty)){ warn_at_print_error(&e->expr->where, "%s-expression (type '%s') not callable", expr_str_friendly(e->expr, 0), type_to_str(func_ty)); fold_had_error = 1; e->tree_type = type_nav_btype(cc1_type_nav, type_int); return; } e->tree_type = type_func_call(func_ty, &args_from_decl); /* func count comparison, only if the func has arg-decls, or the func is f(void) */ UCC_ASSERT(args_from_decl, "no funcargs for decl %s", sp); count_decl = dynarray_count(args_from_decl->arglist); if(check_arg_counts(args_from_decl, count_decl, e->funcargs, e, sp)) return; if(e->funcargs){ check_arg_voidness_and_nonnulls( e, stab, args_from_decl, count_decl, e->funcargs, sp); } if(!FUNCARGS_EMPTY_NOVOID(args_from_decl)) check_arg_types(args_from_decl, e->funcargs, stab, sp, &e->where); if(e->funcargs) default_promote_args(e->funcargs, count_decl, stab); if(type_is_s_or_u(e->tree_type)){ /* handled transparently by the backend */ e->f_islval = expr_is_lval_struct; cc1_warn_at(&e->expr->where, aggregate_return, "called function returns aggregate (%s)", type_to_str(e->tree_type)); } /* attr */ { type *fnty = e->expr->tree_type; /* look through decays */ if(expr_kind(e->expr, cast) && expr_cast_is_lval2rval(e->expr)) fnty = expr_cast_child(e->expr)->tree_type; format_check_call(fnty, e->funcargs, args_from_decl->variadic); sentinel_check( &e->where, e, e->funcargs, args_from_decl->variadic, count_decl, stab); } /* check the subexp tree type to get the funcall attributes */ if(func_or_builtin_attr_present(e, attr_warn_unused)) e->freestanding = 0; /* needs use */ if(sp && !cc1_fopt.freestanding) check_standard_funcs(sp, e->funcargs); }
static void fold_cast_num(expr *const e, numeric *const num) { int to_fp, from_fp; to_fp = type_is_floating(e->tree_type); from_fp = type_is_floating(expr_cast_child(e)->tree_type); if(to_fp){ if(from_fp){ UCC_ASSERT(K_FLOATING(*num), "i/f mismatch types"); /* float -> float - nothing to see here */ }else{ UCC_ASSERT(K_INTEGRAL(*num), "i/f mismatch types"); /* int -> float */ if(num->suffix & VAL_UNSIGNED){ num->val.f = num->val.i; }else{ /* force a signed conversion, long long to long double */ num->val.f = (sintegral_t)num->val.i; } } /* perform the trunc */ switch(type_primitive(e->tree_type)){ default: ICE("fp expected"); #define TRUNC(cse, ty, bmask) \ case type_ ## cse: \ num->val.f = (ty)num->val.f; \ num->suffix = bmask; \ break TRUNC(float, float, VAL_FLOAT); TRUNC(double, double, VAL_DOUBLE); TRUNC(ldouble, long double, VAL_LDOUBLE); #undef TRUNC } return; }else if(from_fp){ UCC_ASSERT(K_FLOATING(*num), "i/f mismatch types"); /* special case _Bool */ if(type_is_primitive(e->tree_type, type__Bool)){ num->val.i = !!num->val.f; }else{ /* float -> int */ num->val.i = num->val.f; } num->suffix = 0; /* fall through to int logic */ } UCC_ASSERT(K_INTEGRAL(*num), "fp const?"); #define pv (&num->val.i) /* need to cast the val.i down as appropriate */ if(type_is_primitive(e->tree_type, type__Bool)){ *pv = !!*pv; /* analagous to out/out.c::out_normalise()'s constant case */ }else if(!from_fp){ *pv = convert_integral_to_integral_warn( *pv, e->expr->tree_type, e->tree_type, e->expr_cast_implicit, &e->where); } #undef pv }