/* * Fresh constant of primitive or scalar type tau */ value_t make_fresh_const(fresh_val_maker_t *maker, type_t tau) { tuple_counter_t *r; uint32_t i, n; value_t v; assert(is_uninterpreted_type(maker->types, tau) || is_scalar_type(maker->types, tau)); /* * If tau is uninterpreted, r->card is UINT32_MAX, * which is larger than the max number of object we can * create in maker->vtbl (so we can treat uninterpreted * types like scalar types). */ r = get_type_counter(maker, tau); assert(is_scalar_type(maker->types, tau) || r->card == UINT32_MAX); n = r->card; i = r->count; while (i<n && vtbl_test_const(maker->vtbl, tau, i)) { i++; } if (i < n) { v = vtbl_mk_const(maker->vtbl, tau, i, NULL); r->count = i + 1; } else { v = null_value; r->count = i; } assert(is_scalar_type(maker->types, tau) || v != null_value); return v; }
// TODO: Return the value or store it? llvm::Value* Generator::gen(Default_init const* e) { Type const* t = e->type(); llvm::Type* type = get_type(t); // Scalar types should get a 0 value in the // appropriate type. if (is_scalar_type(t)) return llvm::ConstantInt::get(type, 0); // Aggregate types are zero initialized. // // NOTE: This isn't actually correct. Aggregate types // should be memberwise default initialized. if (is_aggregate_type(t)) return llvm::ConstantAggregateZero::get(type); throw std::runtime_error("unhahndled default initializer"); }
static Const const_val(Symbol obj) /*;const_val*/ { /* Return the constant value of the object if it has one; * else return om. * The constant value of a user-defined constant is derived from * its SIGNATURE, when this is a constant value. * The constant value of a literal is obtained from the literal map * of its type. */ Tuple sig; if (cdebug2 > 3) TO_ERRFILE("const_val"); if (is_literal(obj)) return eval_lit_map(obj); sig = SIGNATURE(obj); if( is_constant(obj) && is_scalar_type(TYPE_OF(obj)) && N_KIND((Node)sig) == as_ivalue) { return (Const) N_VAL((Node)sig); /* TBSL: could be static but not constant folded yet. */ } else return const_new(CONST_OM); }
int harg_renamet (harglst *a, hargkey_t *key, hargtype_t type, hargkey_t *nkey, hargtype_t ntype) { harg **S, **R, *r ; int same_keys, klen = klen_by_type (type); /* sanity check */ if (a == 0) { errno = EINVAL; return -1; } R = (harg**)find_hlst (a->x, key, klen); if ((r = *R) == 0) { /* zombie, should not happen, anyway */ delete_hlst (a->x, key, klen); errno = ENOENT ; return -1; } /* check for a specific source type */ if (falsify_given_type (type, r->type)) { errno = EPERM ; return -1; } same_keys = nkey == 0 || (is_ptrkey_type (type) && is_ptrkey_type (ntype) && memcmp (key, nkey, sizeof (void*)) == 0) || (is_ptrkey_type (type) == 0 && is_ptrkey_type (ntype) == 0 && strcmp (key, nkey) == 0) ? 1 : 0 ; if (r->type == ntype && same_keys) return 0 ; /* nothing to do */ /* check target type groups */ if (is_blob_type (ntype) && is_blob_type (r->type) || is_scalar_type (ntype) && is_scalar_type (r->type) || is_specific_type (ntype) == 0) { if (same_keys == 0) { /* make new index */ if ((S = (harg**)make_hlst (a->x, nkey, klen_by_type (ntype))) == 0) return -1; *S = *R; *R = 0; delete_hlst (a->x, key, klen); } if (is_specific_type (ntype)) r->type = ntype ; return 0; } errno = EPERM; /* not permitted */ return -1 ; }
smt_astt smt_convt::convert_byte_update(const expr2tc &expr) { const byte_update2t &data = to_byte_update2t(expr); assert(is_scalar_type(data.source_value) && "Byte update only works on " "scalar variables now"); if (!is_constant_int2t(data.source_offset)) { expr2tc source = data.source_value; unsigned int src_width = source->type->get_width(); if (!is_bv_type(source)) source = typecast2tc(get_uint_type(src_width), source); expr2tc offs = data.source_offset; // Endian-ness: if we're in non-"native" endian-ness mode, then flip the // offset distance. The rest of these calculations will still apply. if (data.big_endian) { auto data_size = type_byte_size(*source->type); constant_int2tc data_size_expr(source->type, data_size - 1); sub2tc sub(source->type, data_size_expr, offs); offs = sub; } if (offs->type->get_width() != src_width) offs = typecast2tc(get_uint_type(src_width), offs); expr2tc update = data.update_value; if (update->type->get_width() != src_width) update = typecast2tc(get_uint_type(src_width), update); // The approach: mask, shift and or. XXX, byte order? // Massively inefficient. expr2tc eight = constant_int2tc(get_uint_type(src_width), BigInt(8)); expr2tc effs = constant_int2tc(eight->type, BigInt(255)); offs = mul2tc(eight->type, offs, eight); expr2tc shl = shl2tc(offs->type, effs, offs); expr2tc noteffs = bitnot2tc(effs->type, shl); source = bitand2tc(source->type, source, noteffs); expr2tc shl2 = shl2tc(offs->type, update, offs); return convert_ast(bitor2tc(offs->type, shl2, source)); } // We are merging two values: an 8 bit update value, and a larger source // value that we will have to merge it into. Start off by collecting // information about the source values and their widths. assert(is_number_type(data.source_value->type) && "Byte update of unsupported data type"); smt_astt value, src_value; unsigned int width_op0, width_op2, src_offset; value = convert_ast(data.update_value); src_value = convert_ast(data.source_value); width_op2 = data.update_value->type->get_width(); width_op0 = data.source_value->type->get_width(); src_offset = to_constant_int2t(data.source_offset).constant_value.to_ulong(); // Flip location if we're in big-endian mode if (data.big_endian) { unsigned int data_size = type_byte_size(*data.source_value->type).to_ulong() - 1; src_offset = data_size - src_offset; } if (int_encoding) { std::cerr << "Can't byte update in integer mode; rerun in bitvector mode" << std::endl; abort(); } // Assertion some of our assumptions, which broadly mean that we'll only work // on bytes that are going into non-byte words assert(width_op2 == 8 && "Can't byte update non-byte operations"); assert(width_op2 != width_op0 && "Can't byte update bytes, sorry"); smt_astt top, middle, bottom; // Build in three parts: the most significant bits, any in the middle, and // the bottom, of the reconstructed / merged output. There might not be a // middle if the update byte is at the top or the bottom. unsigned int top_of_update = (8 * src_offset) + 8; unsigned int bottom_of_update = (8 * src_offset); if (top_of_update == width_op0) { top = value; } else { smt_sortt s = mk_sort(SMT_SORT_BV, width_op0 - top_of_update, false); top = mk_extract(src_value, width_op0 - 1, top_of_update, s); } if (top == value) { middle = NULL; } else { middle = value; } if (src_offset == 0) { middle = NULL; bottom = value; } else { smt_sortt s = mk_sort(SMT_SORT_BV, bottom_of_update, false); bottom = mk_extract(src_value, bottom_of_update - 1, 0, s); } // Concatenate the top and bottom, and possible middle, together. smt_astt concat; if (middle != NULL) { smt_sortt s = mk_sort(SMT_SORT_BV, width_op0 - bottom_of_update, false); concat = mk_func_app(s, SMT_FUNC_CONCAT, top, middle); } else { concat = top; } return mk_func_app(src_value->sort, SMT_FUNC_CONCAT, concat, bottom); }
smt_astt smt_convt::convert_byte_extract(const expr2tc &expr) { const byte_extract2t &data = to_byte_extract2t(expr); assert(is_scalar_type(data.source_value) && "Byte extract now only works on " "scalar variables"); if (!is_constant_int2t(data.source_offset)) { expr2tc source = data.source_value; unsigned int src_width = source->type->get_width(); if (!is_bv_type(source)) { source = typecast2tc(get_uint_type(src_width), source); } // The approach: the argument is now a bitvector. Just shift it the // appropriate amount, according to the source offset, and select out the // bottom byte. expr2tc offs = data.source_offset; // Endian-ness: if we're in non-"native" endian-ness mode, then flip the // offset distance. The rest of these calculations will still apply. if (data.big_endian) { auto data_size = type_byte_size(*source->type); constant_int2tc data_size_expr(source->type, data_size - 1); sub2tc sub(source->type, data_size_expr, offs); offs = sub; } if (offs->type->get_width() != src_width) // Z3 requires these two arguments to be the same width offs = typecast2tc(source->type, data.source_offset); lshr2tc shr(source->type, source, offs); smt_astt ext = convert_ast(shr); smt_astt res = mk_extract(ext, 7, 0, convert_sort(get_uint8_type())); return res; } const constant_int2t &intref = to_constant_int2t(data.source_offset); unsigned width; width = data.source_value->type->get_width(); uint64_t upper, lower; if (!data.big_endian) { upper = ((intref.constant_value.to_long() + 1) * 8) - 1; //((i+1)*w)-1; lower = intref.constant_value.to_long() * 8; //i*w; } else { uint64_t max = width - 1; upper = max - (intref.constant_value.to_long() * 8); //max-(i*w); lower = max - ((intref.constant_value.to_long() + 1) * 8 - 1); //max-((i+1)*w-1); } smt_astt source = convert_ast(data.source_value);; if (int_encoding) { std::cerr << "Refusing to byte extract in integer mode; re-run in " "bitvector mode" << std::endl; abort(); } else { if (is_bv_type(data.source_value)) { ; } else if (is_fixedbv_type(data.source_value)) { ; } else if (is_bool_type(data.source_value)) { // We cdan extract a byte from a bool -- zero or one. typecast2tc cast(get_uint8_type(), data.source_value); source = convert_ast(cast); } else { std::cerr << "Unrecognized type in operand to byte extract." << std::endl; data.dump(); abort(); } unsigned int sort_sz = data.source_value->type->get_width(); if (sort_sz <= upper) { smt_sortt s = mk_sort(SMT_SORT_BV, 8, false); return mk_smt_symbol("out_of_bounds_byte_extract", s); } else { return mk_extract(source, upper, lower, convert_sort(expr->type)); } } }