static int eq_array(type *candidate, void *ctx) { struct ctx_array *c = ctx; consty k; assert(candidate->type == type_array); if(candidate->bits.array.is_static != c->is_static) return 0; if(candidate->bits.array.is_vla != c->is_vla) return 0; if(candidate->bits.array.size == c->sz){ /* including [] */ return 1; } if(!candidate->bits.array.size || !c->sz) return 0; const_fold(candidate->bits.array.size, &k); if(k.type == CONST_NUM){ assert(K_INTEGRAL(k.bits.num)); return c->sz_i == k.bits.num.val.i; }else{ /* vla - just check expression equivalence */ return candidate->bits.array.size == c->sz; } }
static void static_array_check( decl *arg_decl, expr *arg_expr) { /* if ty_func is x[static %d], check counts */ type *ty_expr = arg_expr->tree_type; type *ty_decl = decl_is_decayed_array(arg_decl); consty k_decl; if(!ty_decl) return; assert(ty_decl->type == type_array); if(!ty_decl->bits.array.is_static) return; /* want to check any pointer type */ if(expr_is_null_ptr(arg_expr, NULL_STRICT_ANY_PTR)){ cc1_warn_at(&arg_expr->where, attr_nonnull, "passing null-pointer where array expected"); return; } if(!ty_decl->bits.array.size) return; const_fold(ty_decl->bits.array.size, &k_decl); if((ty_expr = type_is_decayed_array(ty_expr))){ assert(ty_expr->type == type_array); if(ty_expr->bits.array.size){ consty k_arg; const_fold(ty_expr->bits.array.size, &k_arg); if(k_decl.type == CONST_NUM && K_INTEGRAL(k_arg.bits.num) && k_arg.bits.num.val.i < k_decl.bits.num.val.i) { cc1_warn_at(&arg_expr->where, static_array_bad, "array of size %" NUMERIC_FMT_D " passed where size %" NUMERIC_FMT_D " needed", k_arg.bits.num.val.i, k_decl.bits.num.val.i); } } } /* else it's a random pointer, just be quiet */ }
static void print_expr_val(expr *e) { consty k; const_fold(e, &k); UCC_ASSERT(k.type == CONST_NUM, "val expected"); UCC_ASSERT((k.bits.num.suffix & VAL_UNSIGNED) == 0, "TODO: unsigned"); if(K_INTEGRAL(k.bits.num)) fprintf(cc1_out, NUMERIC_FMT_D, k.bits.num.val.i); else fprintf(cc1_out, NUMERIC_FMT_LD, k.bits.num.val.f); }
static void sentinel_check(where *w, expr *e, expr **args, const int variadic, const int nstdargs, symtable *stab) { #define ATTR_WARN_RET(w, ...) \ do{ cc1_warn_at(w, attr_sentinel, __VA_ARGS__); return; }while(0) attribute *attr = func_or_builtin_attr_present(e, attr_sentinel); int i, nvs; expr *sentinel; if(!attr) return; if(!variadic) return; /* warning emitted elsewhere, on the decl */ if(attr->bits.sentinel){ consty k; FOLD_EXPR(attr->bits.sentinel, stab); const_fold(attr->bits.sentinel, &k); if(k.type != CONST_NUM || !K_INTEGRAL(k.bits.num)) die_at(&attr->where, "sentinel attribute not reducible to integer constant"); i = k.bits.num.val.i; }else{ i = 0; } nvs = dynarray_count(args) - nstdargs; if(nvs == 0) ATTR_WARN_RET(w, "not enough variadic arguments for a sentinel"); UCC_ASSERT(nvs >= 0, "too few args"); if(i >= nvs) ATTR_WARN_RET(w, "sentinel index is not a variadic argument"); sentinel = args[(nstdargs + nvs - 1) - i]; /* must be of a pointer type, printf("%p\n", 0) is undefined */ if(!expr_is_null_ptr(sentinel, NULL_STRICT_ANY_PTR)) ATTR_WARN_RET(&sentinel->where, "sentinel argument expected (got %s)", type_to_str(sentinel->tree_type)); #undef ATTR_WARN_RET }
static void ctx_array_init( struct ctx_array *ctx, expr *sz, int is_static, int is_vla) { ctx->sz_i = 0; ctx->sz = sz; ctx->is_static = is_static; ctx->is_vla = is_vla; if(sz){ consty k; const_fold(sz, &k); if(K_INTEGRAL(k.bits.num)) ctx->sz_i = k.bits.num.val.i; } }
unsigned type_align(type *r, const where *from) { struct_union_enum_st *sue; type *test; attribute *align; align = type_attr_present(r, attr_aligned); if(align){ if(align->bits.align){ consty k; const_fold(align->bits.align, &k); assert(k.type == CONST_NUM && K_INTEGRAL(k.bits.num)); return k.bits.num.val.i; } return platform_align_max(); } if((sue = type_is_s_or_u(r))) /* safe - can't have an instance without a ->sue */ return sue->align; if(type_is(r, type_ptr) || type_is(r, type_block)) { return platform_word_size(); } if((test = type_is(r, type_btype))) return btype_align(test->bits.type, from); if((test = type_is(r, type_array))) return type_align(test->ref, from); return 1; }
void sanitize_boundscheck( expr *elhs, expr *erhs, out_ctx *octx, const out_val *lhs, const out_val *rhs) { decl *array_decl = NULL; type *array_ty; expr *expr_sz; consty sz; const out_val *val; if(!(cc1_sanitize & CC1_UBSAN)) return; if(type_is_ptr(elhs->tree_type)) array_decl = expr_to_declref(elhs, NULL), val = rhs; else if(type_is_ptr(erhs->tree_type)) array_decl = expr_to_declref(erhs, NULL), val = lhs; if(!array_decl) return; if(!(array_ty = type_is(array_decl->ref, type_array))) return; expr_sz = array_ty->bits.array.size; if(!expr_sz) return; const_fold(expr_sz, &sz); if(sz.type != CONST_NUM) return; if(!K_INTEGRAL(sz.bits.num)) return; /* force unsigned compare, which catches negative indexes */ sanitize_assert_order(val, op_le, sz.bits.num.val.i, uintptr_ty(), octx, "bounds"); }
void bitfield_trunc_check(decl *mem, expr *from) { consty k; if(expr_kind(from, cast)){ /* we'll warn about bitfield truncation, prevent warnings * about cast truncation */ from->expr_cast_implicit = 0; } const_fold(from, &k); if(k.type == CONST_NUM){ const sintegral_t kexp = k.bits.num.val.i; /* highest may be -1 - kexp is zero */ const int highest = integral_high_bit(k.bits.num.val.i, from->tree_type); const int is_signed = type_is_signed(mem->bits.var.field_width->tree_type); const_fold(mem->bits.var.field_width, &k); UCC_ASSERT(k.type == CONST_NUM, "bitfield size not val?"); UCC_ASSERT(K_INTEGRAL(k.bits.num), "fp bitfield size?"); if(highest > (sintegral_t)k.bits.num.val.i || (is_signed && highest == (sintegral_t)k.bits.num.val.i)) { sintegral_t kexp_to = kexp & ~(-1UL << k.bits.num.val.i); cc1_warn_at(&from->where, bitfield_trunc, "truncation in store to bitfield alters value: " "%" NUMERIC_FMT_D " -> %" NUMERIC_FMT_D, kexp, kexp_to); } } }
static void fold_cast_num(expr *const e, numeric *const num) { int to_fp, from_fp; to_fp = type_is_floating(e->tree_type); from_fp = type_is_floating(expr_cast_child(e)->tree_type); if(to_fp){ if(from_fp){ UCC_ASSERT(K_FLOATING(*num), "i/f mismatch types"); /* float -> float - nothing to see here */ }else{ UCC_ASSERT(K_INTEGRAL(*num), "i/f mismatch types"); /* int -> float */ if(num->suffix & VAL_UNSIGNED){ num->val.f = num->val.i; }else{ /* force a signed conversion, long long to long double */ num->val.f = (sintegral_t)num->val.i; } } /* perform the trunc */ switch(type_primitive(e->tree_type)){ default: ICE("fp expected"); #define TRUNC(cse, ty, bmask) \ case type_ ## cse: \ num->val.f = (ty)num->val.f; \ num->suffix = bmask; \ break TRUNC(float, float, VAL_FLOAT); TRUNC(double, double, VAL_DOUBLE); TRUNC(ldouble, long double, VAL_LDOUBLE); #undef TRUNC } return; }else if(from_fp){ UCC_ASSERT(K_FLOATING(*num), "i/f mismatch types"); /* special case _Bool */ if(type_is_primitive(e->tree_type, type__Bool)){ num->val.i = !!num->val.f; }else{ /* float -> int */ num->val.i = num->val.f; } num->suffix = 0; /* fall through to int logic */ } UCC_ASSERT(K_INTEGRAL(*num), "fp const?"); #define pv (&num->val.i) /* need to cast the val.i down as appropriate */ if(type_is_primitive(e->tree_type, type__Bool)){ *pv = !!*pv; /* analagous to out/out.c::out_normalise()'s constant case */ }else if(!from_fp){ *pv = convert_integral_to_integral_warn( *pv, e->expr->tree_type, e->tree_type, e->expr_cast_implicit, &e->where); } #undef pv }