static int calc_ptr_step(type *t) { type *tnext; /* we are calculating the sizeof *t */ if(type_is_primitive(type_is_ptr(t), type_void)) return type_primitive_size(type_void); if(type_is_primitive(t, type_unknown)) return 1; tnext = type_next(t); if(type_is_vla(tnext, VLA_ANY_DIMENSION)) return -1; return type_size(tnext, NULL); }
enum type_str_type type_str_type(type *r) { type *t = type_is_array(r); if(!t) t = type_is_ptr(r); t = type_is_primitive(t, type_unknown); switch(t ? t->bits.type->primitive : type_unknown){ case type_schar: case type_nchar: case type_uchar: return type_str_char; case type_int: return type_str_wchar; default: return type_str_no; } }
int expr_is_null_ptr(expr *e, enum null_strictness ty) { /* 6.3.2.3: * * An integer constant expression with the value 0, or such an expression * cast to type void *, is called a null pointer constant * * NULL_STRICT_ANY_PTR is used for sentinel checks, * i.e. any null type pointer */ int b = 0; /* void * always qualifies */ if(type_is_primitive(type_is_ptr(e->tree_type), type_void)) b = 1; else if(ty == NULL_STRICT_INT && type_is_integral(e->tree_type)) b = 1; else if(ty == NULL_STRICT_ANY_PTR && type_is_ptr(e->tree_type)) b = 1; return b && const_expr_and_zero(e); }
void fold_expr_assign(expr *e, symtable *stab) { sym *lhs_sym = NULL; int is_struct_cpy = 0; lhs_sym = fold_inc_writes_if_sym(e->lhs, stab); fold_expr_nodecay(e->lhs, stab); fold_expr_nodecay(e->rhs, stab); if(lhs_sym) lhs_sym->nreads--; /* cancel the read that fold_ident thinks it got */ is_struct_cpy = !!type_is_s_or_u(e->lhs->tree_type); if(!is_struct_cpy) FOLD_EXPR(e->rhs, stab); /* lval2rval the rhs */ if(type_is_primitive(e->rhs->tree_type, type_void)){ fold_had_error = 1; warn_at_print_error(&e->where, "assignment from void expression"); e->tree_type = type_nav_btype(cc1_type_nav, type_int); return; } expr_must_lvalue(e->lhs, "assignment"); if(!e->assign_is_init) expr_assign_const_check(e->lhs, &e->where); fold_check_restrict(e->lhs, e->rhs, "assignment", &e->where); /* this makes sense, but it's also critical for code-gen: * if we assign to a volatile lvalue, we don't want the volatile-ness * to propagate, as we are now an rvalue, and don't want our value read * as we decay */ e->tree_type = type_unqualify(e->lhs->tree_type); /* type check */ fold_type_chk_and_cast_ty( e->lhs->tree_type, &e->rhs, stab, &e->where, "assignment"); /* the only way to get a value into a bitfield (aside from memcpy / indirection) is via this * hence we're fine doing the truncation check here */ { decl *mem; if(expr_kind(e->lhs, struct) && (mem = e->lhs->bits.struct_mem.d) /* maybe null from s->non_present_memb */ && mem->bits.var.field_width) { bitfield_trunc_check(mem, e->rhs); } } if(is_struct_cpy){ e->expr = builtin_new_memcpy( e->lhs, e->rhs, type_size(e->rhs->tree_type, &e->rhs->where)); FOLD_EXPR(e->expr, stab); /* set is_lval, so we can participate in struct-copy chains * FIXME: don't interpret as an lvalue, e.g. (a = b) = c; * this is currently special cased in expr_is_lval() * * CHECK THIS */ if(cc1_backend == BACKEND_ASM) e->f_gen = lea_assign_lhs; e->f_islval = expr_is_lval_struct; } }
static void asm_declare_init(enum section_type sec, decl_init *init, type *tfor) { type *r; if(init == DYNARRAY_NULL) init = NULL; if(!init){ /* don't initialise flex-arrays */ if(!type_is_incomplete_array(tfor)){ asm_declare_pad(sec, type_size(tfor, NULL), "null init"/*, type_to_str(tfor)*/); }else{ asm_out_section(sec, ASM_COMMENT " flex array init skipped\n"); } }else if((r = type_is_primitive(tfor, type_struct))){ /* array of stmts for each member * assumes the ->bits.inits order is member order */ struct_union_enum_st *const sue = r->bits.type->sue; sue_member **mem; decl_init **i; unsigned end_of_last = 0; struct bitfield_val *bitfields = NULL; unsigned nbitfields = 0; decl *first_bf = NULL; expr *copy_from_exp; UCC_ASSERT(init->type == decl_init_brace, "unbraced struct"); #define DEBUG(s, ...) /*fprintf(f, "\033[35m" s "\033[m\n", __VA_ARGS__)*/ i = init->bits.ar.inits; /* check for compound-literal copy-init */ if((copy_from_exp = decl_init_is_struct_copy(init, sue))){ decl_init *copy_from_init; copy_from_exp = expr_skip_lval2rval(copy_from_exp); /* the only struct-expression that's possible * in static context is a compound literal */ assert(expr_kind(copy_from_exp, compound_lit) && "unhandled expression init"); copy_from_init = copy_from_exp->bits.complit.decl->bits.var.init.dinit; assert(copy_from_init->type == decl_init_brace); i = copy_from_init->bits.ar.inits; } /* iterate using members, not inits */ for(mem = sue->members; mem && *mem; mem++) { decl *d_mem = (*mem)->struct_member; decl_init *di_to_use = NULL; if(i){ int inc = 1; if(*i == NULL) inc = 0; else if(*i != DYNARRAY_NULL) di_to_use = *i; if(inc){ i++; if(!*i) i = NULL; /* reached end */ } } DEBUG("init for %ld/%s, %s", mem - sue->members, d_mem->spel, di_to_use ? di_to_use->bits.expr->f_str() : NULL); /* only pad if we're not on a bitfield or we're on the first bitfield */ if(!d_mem->bits.var.field_width || !first_bf){ DEBUG("prev padding, offset=%d, end_of_last=%d", d_mem->struct_offset, end_of_last); UCC_ASSERT( d_mem->bits.var.struct_offset >= end_of_last, "negative struct pad, sue %s, member %s " "offset %u, end_of_last %u", sue->spel, decl_to_str(d_mem), d_mem->bits.var.struct_offset, end_of_last); asm_declare_pad(sec, d_mem->bits.var.struct_offset - end_of_last, "prev struct padding"); } if(d_mem->bits.var.field_width){ if(!first_bf || d_mem->bits.var.first_bitfield){ if(first_bf){ DEBUG("new bitfield group (%s is new boundary), old:", d_mem->spel); /* next bitfield group - store the current */ bitfields_out(sec, bitfields, &nbitfields, first_bf->ref); } first_bf = d_mem; } bitfields = bitfields_add( bitfields, &nbitfields, d_mem, di_to_use); }else{ if(nbitfields){ DEBUG("at non-bitfield, prev-bitfield out:", 0); bitfields_out(sec, bitfields, &nbitfields, first_bf->ref); first_bf = NULL; } DEBUG("normal init for %s:", d_mem->spel); asm_declare_init(sec, di_to_use, d_mem->ref); } if(type_is_incomplete_array(d_mem->ref)){ UCC_ASSERT(!mem[1], "flex-arr not at end"); }else if(!d_mem->bits.var.field_width || d_mem->bits.var.first_bitfield){ unsigned last_sz = type_size(d_mem->ref, NULL); end_of_last = d_mem->bits.var.struct_offset + last_sz; DEBUG("done with member \"%s\", end_of_last = %d", d_mem->spel, end_of_last); } } if(nbitfields) bitfields_out(sec, bitfields, &nbitfields, first_bf->ref); free(bitfields); /* need to pad to struct size */ asm_declare_pad(sec, sue_size(sue, NULL) - end_of_last, "struct tail"); }else if((r = type_is(tfor, type_array))){ size_t i, len; decl_init **p; type *next = type_next(tfor); UCC_ASSERT(init->type == decl_init_brace, "unbraced struct"); if(type_is_incomplete_array(tfor)){ len = dynarray_count(init->bits.ar.inits); }else{ UCC_ASSERT(type_is_complete(tfor), "incomplete array/type init"); len = type_array_len(tfor); } for(i = len, p = init->bits.ar.inits; i > 0; i--) { decl_init *this = NULL; if(*p){ this = *p++; if(this != DYNARRAY_NULL && this->type == decl_init_copy){ /*fprintf(f, "# copy from %lu\n", DECL_INIT_COPY_IDX(this, init));*/ struct init_cpy *icpy = *this->bits.range_copy; /* resolve the copy */ this = icpy->range_init; } } asm_declare_init(sec, this, next); } }else if((r = type_is_primitive(tfor, type_union))){ /* union inits are decl_init_brace with spaces up to the first union init, * then NULL/end of the init-array */ struct_union_enum_st *sue = type_is_s_or_u(r); unsigned i, sub = 0; decl_init *u_init; UCC_ASSERT(init->type == decl_init_brace, "brace init expected"); /* skip the empties until we get to one */ for(i = 0; init->bits.ar.inits[i] == DYNARRAY_NULL; i++); if((u_init = init->bits.ar.inits[i])){ decl *mem = sue->members[i]->struct_member; type *mem_r = mem->ref; /* union init, member at index `i' */ if(mem->bits.var.field_width){ /* we know it's integral */ struct bitfield_val bfv; ASSERT_SCALAR(u_init); bitfield_val_set(&bfv, u_init->bits.expr, mem->bits.var.field_width); asm_declare_init_bitfields(sec, &bfv, 1, mem_r); }else{ asm_declare_init(sec, u_init, mem_r); } sub = type_size(mem_r, NULL); } /* else null union init */ asm_declare_pad(sec, type_size(r, NULL) - sub, "union extra"); }else{ /* scalar */ expr *exp = init->bits.expr; UCC_ASSERT(init->type == decl_init_scalar, "scalar init expected"); /* exp->tree_type should match tfor */ { char buf[TYPE_STATIC_BUFSIZ]; UCC_ASSERT( type_cmp(exp->tree_type, tfor, TYPE_CMP_ALLOW_TENATIVE_ARRAY) != TYPE_NOT_EQUAL, "mismatching init types: %s and %s", type_to_str_r(buf, exp->tree_type), type_to_str(tfor)); } /* use tfor, since "abc" has type (char[]){(int)'a', (int)'b', ...} */ DEBUG(" scalar init for %s:", type_to_str(tfor)); static_val(sec, tfor, exp); } }
static enum type_cmp type_cmp_r( type *const orig_a, type *const orig_b, enum type_cmp_opts opts) { enum type_cmp ret; type *a, *b; int subchk = 1; if(!orig_a || !orig_b) return orig_a == orig_b ? TYPE_EQUAL : TYPE_NOT_EQUAL; a = type_skip_all(orig_a); b = type_skip_all(orig_b); /* array/func decay takes care of any array->ptr checks */ if(a->type != b->type){ /* allow _Bool <- pointer */ if(type_is_primitive(a, type__Bool) && type_is_ptr(b)) return TYPE_CONVERTIBLE_IMPLICIT; /* allow int <-> ptr (or block) */ if((type_is_ptr_or_block(a) && type_is_integral(b)) || (type_is_ptr_or_block(b) && type_is_integral(a))) { return TYPE_CONVERTIBLE_EXPLICIT; } /* allow void <- anything */ if(type_is_void(a)) return TYPE_CONVERTIBLE_IMPLICIT; /* allow block <-> fnptr */ if((type_is_fptr(a) && type_is(b, type_block)) || (type_is_fptr(b) && type_is(a, type_block))) { return TYPE_CONVERTIBLE_EXPLICIT; } return TYPE_NOT_EQUAL; } switch(a->type){ case type_auto: ICE("__auto_type"); case type_btype: subchk = 0; ret = btype_cmp(a->bits.type, b->bits.type); break; case type_array: if(a->bits.array.is_vla || b->bits.array.is_vla){ /* fine, pretend they're equal even if different expressions */ ret = TYPE_EQUAL_TYPEDEF; }else{ const int a_has_sz = !!a->bits.array.size; const int b_has_sz = !!b->bits.array.size; if(a_has_sz && b_has_sz){ integral_t av = const_fold_val_i(a->bits.array.size); integral_t bv = const_fold_val_i(b->bits.array.size); if(av != bv) return TYPE_NOT_EQUAL; }else if(a_has_sz != b_has_sz){ if((opts & TYPE_CMP_ALLOW_TENATIVE_ARRAY) == 0) return TYPE_NOT_EQUAL; } } /* next */ break; case type_block: case type_ptr: break; case type_cast: case type_tdef: case type_attr: case type_where: ICE("should've been skipped"); case type_func: switch(funcargs_cmp(a->bits.func.args, b->bits.func.args)){ case FUNCARGS_EXACT_EQUAL: case FUNCARGS_IMPLICIT_CONV: break; default: /* "void (int)" and "void (int, int)" aren't equal, * but a cast can soon fix it */ return TYPE_CONVERTIBLE_EXPLICIT; } break; } if(subchk) ret = type_cmp_r(a->ref, b->ref, opts); if(ret == TYPE_NOT_EQUAL && a->type == type_func) { /* "int (int)" and "void (int)" aren't equal - but castable */ ret = TYPE_CONVERTIBLE_EXPLICIT; } if(ret == TYPE_NOT_EQUAL && a->type == type_ptr && fopt_mode & FOPT_PLAN9_EXTENSIONS) { /* allow b to be an anonymous member of a, if pointers */ struct_union_enum_st *a_sue = type_is_s_or_u(a), *b_sue = type_is_s_or_u(b); if(a_sue && b_sue /* already know they aren't equal */){ /* b_sue has an a_sue, * the implicit cast adjusts to return said a_sue */ if(struct_union_member_find_sue(b_sue, a_sue)) return TYPE_CONVERTIBLE_IMPLICIT; } } /* allow ptr <-> ptr */ if(ret == TYPE_NOT_EQUAL && type_is_ptr(a) && type_is_ptr(b)) ret = TYPE_CONVERTIBLE_EXPLICIT; /* char * and int * are explicitly conv., * even though char and int are implicit */ if(ret == TYPE_CONVERTIBLE_IMPLICIT && a->type == type_ptr) ret = TYPE_CONVERTIBLE_EXPLICIT; if(a->type == type_ptr || a->type == type_block){ switch(ret){ #define MAP(a, b) case a: ret = b; break MAP(TYPE_QUAL_ADD, TYPE_QUAL_POINTED_ADD); MAP(TYPE_QUAL_SUB, TYPE_QUAL_POINTED_SUB); MAP(TYPE_QUAL_POINTED_ADD, TYPE_QUAL_NESTED_CHANGE); MAP(TYPE_QUAL_POINTED_SUB, TYPE_QUAL_NESTED_CHANGE); #undef MAP default: break; } } if(ret & TYPE_EQUAL_ANY){ enum type_qualifier a_qual = type_qual(orig_a); enum type_qualifier b_qual = type_qual(orig_b); if(a_qual && b_qual){ switch(type_qual_cmp(a_qual, b_qual)){ case -1: /* a has more */ ret = TYPE_QUAL_ADD; break; case 1: /* b has more */ ret = TYPE_QUAL_SUB; break; } }else if(a_qual){ ret = TYPE_QUAL_ADD; }else if(b_qual){ ret = TYPE_QUAL_SUB; } /* else neither are casts */ } if(ret == TYPE_EQUAL){ int at = orig_a->type == type_tdef; int bt = orig_b->type == type_tdef; if(at != bt){ /* one is a typedef */ ret = TYPE_EQUAL_TYPEDEF; }else if(at){ /* both typedefs */ if(orig_a->bits.tdef.decl != orig_b->bits.tdef.decl){ ret = TYPE_EQUAL_TYPEDEF; } } /* else no typedefs */ } return ret; }
static type *type_add_type_str(type *r, char **bufp, int *sz, enum type_str_opts const opts) { /* go down to the first type or typedef, print it and then its descriptions */ type *ty; **bufp = '\0'; for(ty = r; ty && ty->type != type_btype; ty = ty->ref) { if((opts & TY_STR_NO_TDEF) == 0 && ty->type == type_tdef) break; } if(!ty) return NULL; if(ty->type == type_tdef){ char buf[BTYPE_STATIC_BUFSIZ]; decl *d = ty->bits.tdef.decl; type *of; if(d){ BUF_ADD("%s", d->spel); of = d->ref; }else{ expr *const e = ty->bits.tdef.type_of; int const is_type = !e->expr; BUF_ADD("typeof(%s%s)", /* e is always expr_sizeof() */ is_type ? "" : "expr: ", is_type ? type_to_str_r_spel_opts(buf, e->tree_type, NULL, TY_STR_NOOPT) : e->expr->f_str()); /* don't show aka for typeof types - it's there already */ of = is_type ? NULL : e->tree_type; } if((opts & TY_STR_AKA) && of){ /* descend to the type if it's next */ type *t_ref = type_is_primitive(of, type_unknown); const btype *t = t_ref ? t_ref->bits.type : NULL; BUF_ADD(" (aka '%s')", t ? btype_to_str(t) : type_to_str_r_spel_opts(buf, type_skip_tdefs(of), NULL, TY_STR_NOOPT)); } return ty; }else{ BUF_ADD("%s", btype_to_str(ty->bits.type)); } return NULL; }
static void fold_cast_num(expr *const e, numeric *const num) { int to_fp, from_fp; to_fp = type_is_floating(e->tree_type); from_fp = type_is_floating(expr_cast_child(e)->tree_type); if(to_fp){ if(from_fp){ UCC_ASSERT(K_FLOATING(*num), "i/f mismatch types"); /* float -> float - nothing to see here */ }else{ UCC_ASSERT(K_INTEGRAL(*num), "i/f mismatch types"); /* int -> float */ if(num->suffix & VAL_UNSIGNED){ num->val.f = num->val.i; }else{ /* force a signed conversion, long long to long double */ num->val.f = (sintegral_t)num->val.i; } } /* perform the trunc */ switch(type_primitive(e->tree_type)){ default: ICE("fp expected"); #define TRUNC(cse, ty, bmask) \ case type_ ## cse: \ num->val.f = (ty)num->val.f; \ num->suffix = bmask; \ break TRUNC(float, float, VAL_FLOAT); TRUNC(double, double, VAL_DOUBLE); TRUNC(ldouble, long double, VAL_LDOUBLE); #undef TRUNC } return; }else if(from_fp){ UCC_ASSERT(K_FLOATING(*num), "i/f mismatch types"); /* special case _Bool */ if(type_is_primitive(e->tree_type, type__Bool)){ num->val.i = !!num->val.f; }else{ /* float -> int */ num->val.i = num->val.f; } num->suffix = 0; /* fall through to int logic */ } UCC_ASSERT(K_INTEGRAL(*num), "fp const?"); #define pv (&num->val.i) /* need to cast the val.i down as appropriate */ if(type_is_primitive(e->tree_type, type__Bool)){ *pv = !!*pv; /* analagous to out/out.c::out_normalise()'s constant case */ }else if(!from_fp){ *pv = convert_integral_to_integral_warn( *pv, e->expr->tree_type, e->tree_type, e->expr_cast_implicit, &e->where); } #undef pv }