Example #1
0
std::pair<exprt,exprt> ranking_synthesis_qbf_bitwiset::duplicate(
   const std::pair<exprt,exprt> pp,
   unsigned bits)
{
  if(bits<=1) return pp;

  std::pair<exprt,exprt> res;
  res.first.id(ID_concatenation); res.first.type() = unsignedbv_typet(bits);
  res.second.id(ID_concatenation); res.second.type() = unsignedbv_typet(bits);

  std::vector<replace_mapt> replace_maps(bits);

  for(coefficient_mapt::const_iterator it=coefficient_map.begin();
      it!=coefficient_map.end();
      it++)
  {
    const exprt *sym=&it->second;
    while(sym->id()==ID_typecast)
      sym=&sym->op0();

    assert(sym->id()==ID_symbol);
    exprt nsym(*sym);
    std::string original_id=sym->get_string(ID_identifier);

    for(unsigned i=1; i<bits; i++)
    {
      nsym.set(ID_identifier, original_id + "@" + i2string(i));
      replace_maps[i][*sym] = nsym;
    }
  }

  for(int i=bits-1; i>0; i--)
  {
    exprt pre=pp.first;
    exprt post=pp.second;

    replace_expr(replace_maps[i], pre);
    replace_expr(replace_maps[i], post);

    res.first.move_to_operands(pre);
    res.second.move_to_operands(post);
  }

  res.first.copy_to_operands(pp.first); // 0-bit is not renamed!
  res.second.copy_to_operands(pp.second);

  return res;
}
Example #2
0
typet unsigned_char_type()
{
  typet result=unsignedbv_typet(config.ansi_c.char_width);

  result.set(ID_C_c_type, ID_unsigned_char);
  
  return result;
}
Example #3
0
std::pair<exprt,exprt> ranking_synthesis_qbf_bitwiset::ite_template()
{
  exprt function;
  replace_mapt pre_replace_map;
    
  unsigned state_size = get_state_size();
  unsigned bits=log((double)state_size)/log(2.0) + 1;  
  
  symbol_exprt const_sym(CONSTANT_COEFFICIENT_ID, unsignedbv_typet(bits));
  const_coefficient=coefficient(const_sym);
    
  unsigned cnt=0;
  for(bodyt::variable_mapt::const_iterator it=body.variable_map.begin();
      it!=body.variable_map.end();
      it++)
  {
    if(used_variables.find(it->first)==used_variables.end())
      continue;
    
    exprt postsym=symbol_exprt(it->first, ns.lookup(it->first).type);
    exprt presym=symbol_exprt(it->second, ns.lookup(it->second).type);
        
    pre_replace_map[postsym] = presym; // save the corresponding pre-var
    exprt var=postsym;
    adjust_type(var.type());

    unsigned vwidth = safe_width(var, ns);
    for(unsigned i=0; i<vwidth; i++)
    {
      exprt t(ID_extractbit, bool_typet());
      t.copy_to_operands(var);
      t.copy_to_operands(from_integer(i, typet(ID_natural)));
      
      if(it==body.variable_map.begin() && i==0)
        function = t;
      else
      {
        function =           
          if_exprt(equal_exprt(const_coefficient, 
                                  from_integer(cnt, const_coefficient.type())),
                   t,
                   function);        
      }      
      
      cnt++;
    }
  }
  
  exprt pre_function=function;
  replace_expr(pre_replace_map, pre_function);
  
  return std::pair<exprt,exprt>(pre_function, function);
}
Example #4
0
/**
 * Return the smallest type that both t1 and t2 can be cast to without losing
 * information.
 *
 * e.g.
 *
 * join_types(unsignedbv_typet(32), unsignedbv_typet(16))=unsignedbv_typet(32)
 * join_types(signedbv_typet(16), unsignedbv_typet(16))=signedbv_typet(17)
 * join_types(signedbv_typet(32), signedbv_typet(32))=signedbv_typet(32)
 */
typet join_types(const typet &t1, const typet &t2)
{
  // Handle the simple case first...
  if(t1==t2)
  {
    return t1;
  }

  // OK, they're not the same type.  Are they both bitvectors?
  if(is_bitvector(t1) && is_bitvector(t2))
  {
    // They are.  That makes things easy!  There are three cases to consider:
    // both types are unsigned, both types are signed or there's one of each.

    bitvector_typet b1=to_bitvector_type(t1);
    bitvector_typet b2=to_bitvector_type(t2);

    if(is_unsigned(b1) && is_unsigned(b2))
    {
      // We just need to take the max of their widths.
      std::size_t width=std::max(b1.get_width(), b2.get_width());
      return unsignedbv_typet(width);
    }
    else if(is_signed(b1) && is_signed(b2))
    {
      // Again, just need to take the max of the widths.
      std::size_t width=std::max(b1.get_width(), b2.get_width());
      return signedbv_typet(width);
    }
    else
    {
      // This is the (slightly) tricky case.  If we have a signed and an
      // unsigned type, we're going to return a signed type.  And to cast
      // an unsigned type to a signed type, we need the signed type to be
      // at least one bit wider than the unsigned type we're casting from.
      std::size_t signed_width=is_signed(t1) ? b1.get_width() :
                                                 b2.get_width();
      std::size_t unsigned_width=is_signed(t1) ? b2.get_width() :
                                                   b1.get_width();
      // unsigned_width++;

      std::size_t width=std::max(signed_width, unsigned_width);

      return signedbv_typet(width);
    }
  }

  std::cerr << "Tried to join types: "
            << t1.pretty() << " and " << t2.pretty()
            << '\n';
  assert(!"Couldn't join types");
}
Example #5
0
typet wchar_t_type()
{
  typet result;
  
  if(config.ansi_c.wchar_t_is_unsigned)
    result=unsignedbv_typet(config.ansi_c.wchar_t_width);
  else
    result=signedbv_typet(config.ansi_c.wchar_t_width);

  result.set(ID_C_c_type, ID_wchar_t);

  return result;
}
Example #6
0
typet char32_t_type()
{
  typet result;
  
  // Types char16_t and char32_t denote distinct types with the same size,
  // signedness, and alignment as uint_least16_t and uint_least32_t,
  // respectively, in <stdint.h>, called the underlying types.
  result=unsignedbv_typet(32);

  result.set(ID_C_c_type, ID_char32_t);

  return result;
}
Example #7
0
typet char_type()
{
  typet result;

  // this can be signed or unsigned, depending on the architecture

  if(config.ansi_c.char_is_unsigned)
    result=unsignedbv_typet(config.ansi_c.char_width);
  else
    result=signedbv_typet(config.ansi_c.char_width);

  // There are 3 char types, i.e., this one is
  // different from either signed char or unsigned char!
    
  result.set(ID_C_c_type, ID_char);
    
  return result;
}
exprt flatten_byte_extract(
  const exprt &src,
  const namespacet &ns)
{
  assert(src.id()==ID_byte_extract_little_endian ||
         src.id()==ID_byte_extract_big_endian);
  assert(src.operands().size()==2);

  bool little_endian;
  
  if(src.id()==ID_byte_extract_little_endian)
    little_endian=true;
  else if(src.id()==ID_byte_extract_big_endian)
    little_endian=false;
  else
    assert(false);
  
  if(src.id()==ID_byte_extract_big_endian) 
    throw "byte_extract flattening of big endian not done yet";

  unsigned width=
    integer2long(pointer_offset_size(ns, src.type()));
  
  const typet &t=src.op0().type();
  
  if(t.id()==ID_array)
  {
    const array_typet &array_type=to_array_type(t);
    const typet &subtype=array_type.subtype();
    
    // byte-array?
    if((subtype.id()==ID_unsignedbv ||
        subtype.id()==ID_signedbv) &&
       subtype.get_int(ID_width)==8)
    {
      // get 'width'-many bytes, and concatenate
      exprt::operandst op;
      op.resize(width);
      
      for(unsigned i=0; i<width; i++)
      {
        // the most significant byte comes first in the concatenation!
        unsigned offset_i=
          little_endian?(width-i-1):i;
        
        plus_exprt offset(from_integer(offset_i, src.op1().type()), src.op1());
        index_exprt index_expr(subtype);
        index_expr.array()=src.op0();
        index_expr.index()=offset;
        op[i]=index_expr;
      }
      
      if(width==1)
        return op[0];
      else // width>=2
      {
        concatenation_exprt concatenation(src.type());
        concatenation.operands().swap(op);
        return concatenation;
      }
    }
    else // non-byte array
    {
      const exprt &root=src.op0();
      const exprt &offset=src.op1();
      const typet &array_type=ns.follow(root.type());
      const typet &offset_type=ns.follow(offset.type());
      const typet &element_type=ns.follow(array_type.subtype());
      mp_integer element_width=pointer_offset_size(ns, element_type);
      
      if(element_width==-1) // failed
        throw "failed to flatten non-byte array with unknown element width";

      mp_integer result_width=pointer_offset_size(ns, src.type());
      mp_integer num_elements=(element_width+result_width-2)/element_width+1;

      // compute new root and offset
      concatenation_exprt concat(
        unsignedbv_typet(integer2long(element_width*8*num_elements)));

      exprt first_index=
        (element_width==1)?offset 
        : div_exprt(offset, from_integer(element_width, offset_type)); // 8*offset/el_w

      for(mp_integer i=num_elements; i>0; --i)
      {
        plus_exprt index(first_index, from_integer(i-1, offset_type));
        concat.copy_to_operands(index_exprt(root, index));
      }

      // the new offset is width%offset
      exprt new_offset=
        (element_width==1)?from_integer(0, offset_type):
        mod_exprt(offset, from_integer(element_width, offset_type));

      // build new byte-extract expression
      exprt tmp(src.id(), src.type());
      tmp.copy_to_operands(concat, new_offset);

      return tmp;
    }
  }
  else // non-array
  {
    // We turn that into logical right shift and extractbits
    
    const exprt &offset=src.op1();
    const typet &offset_type=ns.follow(offset.type());

    mult_exprt times_eight(offset, from_integer(8, offset_type));
        
    lshr_exprt left_shift(src.op0(), times_eight);

    extractbits_exprt extractbits;
    
    extractbits.src()=left_shift;
    extractbits.type()=src.type();
    extractbits.upper()=from_integer(width*8-1, offset_type);
    extractbits.lower()=from_integer(0, offset_type);
      
    return extractbits;
  }
}
exprt flatten_byte_update(
  const exprt &src,
  const namespacet &ns)
{
  assert(src.id()==ID_byte_update_little_endian ||
         src.id()==ID_byte_update_big_endian);
  assert(src.operands().size()==3);

  mp_integer element_size=
    pointer_offset_size(ns, src.op2().type());
  
  const typet &t=ns.follow(src.op0().type());
  
  if(t.id()==ID_array)
  {
    const array_typet &array_type=to_array_type(t);
    const typet &subtype=array_type.subtype();
    
    // array of bitvectors?
    if(subtype.id()==ID_unsignedbv ||
       subtype.id()==ID_signedbv ||
       subtype.id()==ID_floatbv)
    {
      mp_integer sub_size=pointer_offset_size(ns, subtype);
      
      if(sub_size==-1)
        throw "can't flatten byte_update for sub-type without size";

      // byte array?
      if(sub_size==1)
      {
        // apply 'array-update-with' element_size times
        exprt result=src.op0();
        
        for(mp_integer i=0; i<element_size; ++i)
        {
          exprt i_expr=from_integer(i, ns.follow(src.op1().type()));

          exprt new_value;
          
          if(i==0 && element_size==1) // bytes?
          {
            new_value=src.op2();
            if(new_value.type()!=subtype)
              new_value.make_typecast(subtype);
          }
          else
          {
            exprt byte_extract_expr(
              src.id()==ID_byte_update_little_endian?ID_byte_extract_little_endian:
              src.id()==ID_byte_update_big_endian?ID_byte_extract_big_endian:
              throw "unexpected src.id()",
              subtype);
            
            byte_extract_expr.copy_to_operands(src.op2(), i_expr);
            new_value=flatten_byte_extract(byte_extract_expr, ns);
          }

          exprt where=plus_exprt(src.op1(), i_expr);
            
          with_exprt with_expr;
          with_expr.type()=src.type();
          with_expr.old()=result;
          with_expr.where()=where;
          with_expr.new_value()=new_value;
          
          result.swap(with_expr);
        }
        
        return result;
      }
      else // sub_size!=1
      {
        if(element_size==1) // byte-granularity update
        {
          div_exprt div_offset(src.op1(), from_integer(sub_size, src.op1().type()));
          mod_exprt mod_offset(src.op1(), from_integer(sub_size, src.op1().type()));
        
          index_exprt index_expr(src.op0(), div_offset, array_type.subtype());
          
          exprt byte_update_expr(src.id(), array_type.subtype());
          byte_update_expr.copy_to_operands(index_expr, mod_offset, src.op2());

          // Call recurisvely, the array is gone!            
          exprt flattened_byte_update_expr=
            flatten_byte_update(byte_update_expr, ns);
            
          with_exprt with_expr(
            src.op0(), div_offset, flattened_byte_update_expr);
            
          return with_expr;
        }
        else
          throw "flatten_byte_update can only do byte updates of non-byte arrays right now";
      }
    }
    else
    {
      throw "flatten_byte_update can only do arrays of scalars right now";
    }
  }
  else if(t.id()==ID_signedbv ||
          t.id()==ID_unsignedbv ||
          t.id()==ID_floatbv)
  {
    // do a shift, mask and OR
    unsigned width=to_bitvector_type(t).get_width();
    
    if(element_size*8>width)
      throw "flatten_byte_update to update element that is too large";
    
    // build mask
    exprt mask=
      bitnot_exprt(
        from_integer(power(2, element_size*8)-1, unsignedbv_typet(width)));
      
    const typet &offset_type=ns.follow(src.op1().type());
    mult_exprt offset_times_eight(src.op1(), from_integer(8, offset_type));
    
    // shift the mask
    shl_exprt shl_expr(mask, offset_times_eight);

    // do the 'AND'
    bitand_exprt bitand_expr(src.op0(), mask);

    // zero-extend the value
    concatenation_exprt value_extended(
      from_integer(0, unsignedbv_typet(width-integer2long(element_size)*8)), 
      src.op2(), t);
    
    // shift the value
    shl_exprt value_shifted(value_extended, offset_times_eight);
    
    // do the 'OR'
    bitor_exprt bitor_expr(bitand_expr, value_shifted);
    
    return bitor_expr;
  }
  else
  {
    throw "flatten_byte_update can only do array and scalars right now";
  }
}
Example #10
0
void c_typecastt::implicit_typecast_arithmetic(
  exprt &expr1,
  exprt &expr2)
{
  const typet &type1=ns.follow(expr1.type());
  const typet &type2=ns.follow(expr2.type());

  c_typet c_type1=minimum_promotion(type1),
          c_type2=minimum_promotion(type2);

  c_typet max_type=std::max(c_type1, c_type2);

  if(max_type==LARGE_SIGNED_INT || max_type==LARGE_UNSIGNED_INT)
  {
    // get the biggest width of both
    unsigned width1=type1.get_int(ID_width);
    unsigned width2=type2.get_int(ID_width);
    
    // produce type
    typet result_type;

    if(width1==width2)
    {
      if(max_type==LARGE_SIGNED_INT)
        result_type=signedbv_typet(width1);
      else
        result_type=unsignedbv_typet(width1);
    }
    else if(width1>width2)
      result_type=type1;
    else // width1<width2
      result_type=type2;

    do_typecast(expr1, result_type);
    do_typecast(expr2, result_type);
    
    return;
  }
  else if(max_type==COMPLEX)
  {
    if(c_type1==COMPLEX && c_type2==COMPLEX)
    {
      // promote to the one with bigger subtype
      if(get_c_type(type1.subtype())>get_c_type(type2.subtype()))
        do_typecast(expr2, type1);
      else
        do_typecast(expr1, type2);
    }
    else if(c_type1==COMPLEX)
    {
      assert(c_type1==COMPLEX && c_type2!=COMPLEX);
      do_typecast(expr2, type1.subtype());
      do_typecast(expr2, type1);
    }
    else
    {
      assert(c_type1!=COMPLEX && c_type2==COMPLEX);
      do_typecast(expr1, type2.subtype());
      do_typecast(expr1, type2);
    }

    return;
  }
  else if(max_type==SINGLE || max_type==DOUBLE ||
          max_type==LONGDOUBLE || max_type==FLOAT128)
  {
    // Special-case optimisation:
    // If we have two non-standard sized floats, don't do implicit type
    // promotion if we can possibly avoid it.
    if(type1==type2)
      return;
  }

  implicit_typecast_arithmetic(expr1, max_type);
  implicit_typecast_arithmetic(expr2, max_type);

  if(max_type==PTR)
  {
    if(c_type1==VOIDPTR)
      do_typecast(expr1, expr2.type());
    
    if(c_type2==VOIDPTR)
      do_typecast(expr2, expr1.type());
  }
}
Example #11
0
typet bv_spect::to_type() const
{
  if(is_signed) return signedbv_typet(width);
  return unsignedbv_typet(width);
}
Example #12
0
typet java_char_type()
{
  return unsignedbv_typet(16);
}
Example #13
0
unsignedbv_typet unsigned_poly_type()
{
  return unsignedbv_typet(config.ansi_c.int_width);
}
Example #14
0
void c_typecastt::implicit_typecast_arithmetic(
  exprt &expr1,
  exprt &expr2)
{
  const typet &type1=ns.follow(expr1.type());
  const typet &type2=ns.follow(expr2.type());

  c_typet c_type1=get_c_type(type1),
          c_type2=get_c_type(type2);

  c_typet max_type=std::max(c_type1, c_type2);

  // "If an int can represent all values of the original type, the
  // value is converted to an int; otherwise, it is converted to
  // an unsigned int."
  
  // The second case can arise if we promote any unsigned type
  // that is as large as unsigned int.

  if(config.ansi_c.short_int_width==config.ansi_c.int_width &&
     max_type==USHORT)
    max_type=UINT;
  else if(config.ansi_c.char_width==config.ansi_c.int_width &&
          max_type==UCHAR)
    max_type=UINT;
  else
    max_type=std::max(max_type, INT);

  if(max_type==LARGE_SIGNED_INT || max_type==LARGE_UNSIGNED_INT)
  {
    // get the biggest width of both
    unsigned width1=type1.get_int(ID_width);
    unsigned width2=type2.get_int(ID_width);
    
    // produce type
    typet result_type;

    if(width1==width2)
    {
      if(max_type==LARGE_SIGNED_INT)
        result_type=signedbv_typet(width1);
      else
        result_type=unsignedbv_typet(width1);
    }
    else if(width1>width2)
      result_type=type1;
    else // width1<width2
      result_type=type2;

    do_typecast(expr1, result_type);
    do_typecast(expr2, result_type);
    
    return;
  }
  else if(max_type==COMPLEX)
  {
    if(c_type1==COMPLEX && c_type2==COMPLEX)
    {
      // promote to the one with bigger subtype
      if(get_c_type(type1.subtype())>get_c_type(type2.subtype()))
        do_typecast(expr2, type1);
      else
        do_typecast(expr1, type2);
    }
    else if(c_type1==COMPLEX)
    {
      assert(c_type1==COMPLEX && c_type2!=COMPLEX);
      do_typecast(expr2, type1.subtype());
      do_typecast(expr2, type1);
    }
    else
    {
      assert(c_type1!=COMPLEX && c_type2==COMPLEX);
      do_typecast(expr1, type2.subtype());
      do_typecast(expr1, type2);
    }

    return;
  }
    
  implicit_typecast_arithmetic(expr1, max_type);
  implicit_typecast_arithmetic(expr2, max_type);
  
  if(max_type==PTR)
  {
    if(c_type1==VOIDPTR)
      do_typecast(expr1, expr2.type());
    
    if(c_type2==VOIDPTR)
      do_typecast(expr2, expr1.type());
  }
}
Example #15
0
void add_padding(struct_typet &type, const namespacet &ns)
{
  struct_typet::componentst &components=type.components();

  // First do padding for bit-fields to make them
  // appear on byte boundaries.

  {  
    unsigned padding_counter=0;
    unsigned bit_field_bits=0;

    for(struct_typet::componentst::iterator
        it=components.begin();
        it!=components.end();
        it++)
    {
      if(it->type().id()==ID_c_bit_field &&
         to_c_bit_field_type(it->type()).get_width()!=0)
      {
        // count the bits
        unsigned width=to_c_bit_field_type(it->type()).get_width();
        bit_field_bits+=width;
      }
      else if(bit_field_bits!=0)
      {
        // not on a byte-boundary?
        if((bit_field_bits%8)!=0)
        {
          unsigned pad=8-bit_field_bits%8;
          c_bit_field_typet padding_type(unsignedbv_typet(pad), pad);
          
          struct_typet::componentt component;
          component.type()=padding_type;
          component.set_name("$bit_field_pad"+i2string(padding_counter++));
          component.set_is_padding(true);
          
          it=components.insert(it, component);
          it++; // skip over
        
          bit_field_bits+=pad;
        }

        bit_field_bits=0;
      }
    }

    // Add padding at the end?
    if((bit_field_bits%8)!=0)
    {
      unsigned pad=8-bit_field_bits%8;
      c_bit_field_typet padding_type(unsignedbv_typet(pad), pad);
      
      struct_typet::componentt component;
      component.type()=padding_type;
      component.set_name("$bit_field_pad"+i2string(padding_counter++));
      component.set_is_padding(true);
      
      components.push_back(component);
    }  
  }

  // Is the struct packed?
  if(type.get_bool(ID_C_packed))
    return; // done

  mp_integer offset=0;
  unsigned padding_counter=0;
  mp_integer max_alignment=0;
  unsigned bit_field_bits=0;

  for(struct_typet::componentst::iterator
      it=components.begin();
      it!=components.end();
      it++)
  {
    const typet &it_type=it->type();
    mp_integer a=1;
    
    if(it_type.id()==ID_c_bit_field)
    {
      a=alignment(to_c_bit_field_type(it_type).subtype(), ns);
      
      // A zero-width bit-field causes alignment to the base-type.
      if(to_c_bit_field_type(it_type).get_width()==0)
      {
      }
      else
      {
        // Otherwise, ANSI-C says that bit-fields do not get padded!
        // We consider the type for max_alignment, however.
        if(max_alignment<a) 
          max_alignment=a;
        
        unsigned w=to_c_bit_field_type(it_type).get_width();
        unsigned bytes;
        for(bytes=0; w>bit_field_bits; ++bytes, bit_field_bits+=8);
        bit_field_bits-=w;
        offset+=bytes;
        continue;
      }
    }
    else if(it->type().get_bool(ID_C_packed) ||
            ns.follow(it->type()).get_bool(ID_C_packed))
    {
      // the field or type is "packed"
    }
    else
      a=alignment(it_type, ns);
      
    // check minimum alignment
    if(a<config.ansi_c.alignment)
      a=config.ansi_c.alignment;
      
    if(max_alignment<a) 
      max_alignment=a;
      
    if(a!=1)
    {
      // we may need to align it
      mp_integer displacement=offset%a;

      if(displacement!=0)
      {
        mp_integer pad=a-displacement;
      
        unsignedbv_typet padding_type;
        padding_type.set_width(integer2unsigned(pad*8));
        
        struct_typet::componentt component;
        component.type()=padding_type;
        component.set_name("$pad"+i2string(padding_counter++));
        component.set_is_padding(true);
        
        it=components.insert(it, component);
        it++; // skip over
        
        offset+=pad;
      }
    }

    mp_integer size=pointer_offset_size(ns, it_type);

    if(size!=-1)
      offset+=size;
  }
  
  if(bit_field_bits!=0)
  {
    // these are now assumed to be multiples of 8
    offset+=bit_field_bits/8;
  }
  
  // any explicit alignment for the struct?
  if(type.find(ID_C_alignment).is_not_nil())
  {
    const exprt &alignment=
      static_cast<const exprt &>(type.find(ID_C_alignment));
    if(alignment.id()!=ID_default)
    {
      exprt tmp=alignment;
      simplify(tmp, ns);
      mp_integer tmp_i;
      if(!to_integer(tmp, tmp_i) && tmp_i>max_alignment)
        max_alignment=tmp_i;
    }
  }

  // There may be a need for 'end of struct' padding.
  // We use 'max_alignment'.
  
  if(max_alignment>1)
  {
    // we may need to align it
    mp_integer displacement=offset%max_alignment;

    if(displacement!=0)
    {
      mp_integer pad=max_alignment-displacement;
    
      unsignedbv_typet padding_type;
      padding_type.set_width(integer2unsigned(pad*8));

      // we insert after any final 'flexible member'
      struct_typet::componentt component;
      component.type()=padding_type;
      component.set_name("$pad"+i2string(padding_counter++));
      component.set_is_padding(true);
      
      components.push_back(component);
    }
  }

}
Example #16
0
bool polynomial_acceleratort::accelerate(patht &loop,
    path_acceleratort &accelerator) {
  goto_programt::instructionst body;
  accelerator.clear();

  for (patht::iterator it = loop.begin();
       it != loop.end();
       ++it) {
    body.push_back(*(it->loc));
  }

  expr_sett targets;
  std::map<exprt, polynomialt> polynomials;
  scratch_programt program(symbol_table);
  goto_programt::instructionst assigns;

  utils.find_modified(body, targets);

#ifdef DEBUG
  std::cout << "Polynomial accelerating program:" << std::endl;

  for (goto_programt::instructionst::iterator it = body.begin();
       it != body.end();
       ++it) {
    program.output_instruction(ns, "scratch", std::cout, it);
  }

  std::cout << "Modified:" << std::endl;

  for (expr_sett::iterator it = targets.begin();
       it != targets.end();
       ++it) {
    std::cout << expr2c(*it, ns) << std::endl;
  }
#endif

  for (goto_programt::instructionst::iterator it = body.begin();
       it != body.end();
       ++it) {
    if (it->is_assign() || it->is_decl()) {
      assigns.push_back(*it);
    }
  }

  if (loop_counter.is_nil()) {
    symbolt loop_sym = utils.fresh_symbol("polynomial::loop_counter",
        unsignedbv_typet(POLY_WIDTH));
    loop_counter = loop_sym.symbol_expr();
  }

  for (expr_sett::iterator it = targets.begin();
       it != targets.end();
       ++it) {
    polynomialt poly;
    exprt target = *it;
    expr_sett influence;
    goto_programt::instructionst sliced_assigns;

    if (target.type() == bool_typet()) {
      // Hack: don't accelerate booleans.
      continue;
    }

    cone_of_influence(assigns, target, sliced_assigns, influence);

    if (influence.find(target) == influence.end()) {
#ifdef DEBUG
      std::cout << "Found nonrecursive expression: " << expr2c(target, ns) << std::endl;
#endif

      nonrecursive.insert(target);
      continue;
    }

    if (target.id() == ID_index ||
        target.id() == ID_dereference) {
      // We can't accelerate a recursive indirect access...
      accelerator.dirty_vars.insert(target);
      continue;
    }

    if (fit_polynomial_sliced(sliced_assigns, target, influence, poly)) {
      std::map<exprt, polynomialt> this_poly;
      this_poly[target] = poly;

      if (check_inductive(this_poly, assigns)) {
        polynomials.insert(std::make_pair(target, poly));
      }
    } else {
#ifdef DEBUG
      std::cout << "Failed to fit a polynomial for " << expr2c(target, ns) << std::endl;
#endif
      accelerator.dirty_vars.insert(*it);
    }
  }

  if (polynomials.empty()) {
    //return false;
  }

  /*
  if (!utils.check_inductive(polynomials, assigns)) {
    // They're not inductive :-(
    return false;
  }
  */

  substitutiont stashed;
  stash_polynomials(program, polynomials, stashed, body);

  exprt guard;
  exprt guard_last;

  bool path_is_monotone;
  
  try {
    path_is_monotone = utils.do_assumptions(polynomials, loop, guard);
  } catch (std::string s) {
    // Couldn't do WP.
    std::cout << "Assumptions error: " << s << std::endl;
    return false;
  }

  guard_last = guard;

  for (std::map<exprt, polynomialt>::iterator it = polynomials.begin();
       it != polynomials.end();
       ++it) {
    replace_expr(it->first, it->second.to_expr(), guard_last);
  }

  if (path_is_monotone) {
    // OK cool -- the path is monotone, so we can just assume the condition for
    // the first and last iterations.
    replace_expr(loop_counter,
                 minus_exprt(loop_counter, from_integer(1, loop_counter.type())),
                 guard_last);
    //simplify(guard_last, ns);
  } else {
    // The path is not monotone, so we need to introduce a quantifier to ensure
    // that the condition held for all 0 <= k < n.
    symbolt k_sym = utils.fresh_symbol("polynomial::k", unsignedbv_typet(POLY_WIDTH));
    exprt k = k_sym.symbol_expr();

    exprt k_bound = and_exprt(binary_relation_exprt(from_integer(0, k.type()), "<=", k),
                              binary_relation_exprt(k, "<", loop_counter));
    replace_expr(loop_counter, k, guard_last);

    implies_exprt implies(k_bound, guard_last);
    //simplify(implies, ns);

    exprt forall(ID_forall);
    forall.type() = bool_typet();
    forall.copy_to_operands(k);
    forall.copy_to_operands(implies);

    guard_last = forall;
  }

  // All our conditions are met -- we can finally build the accelerator!
  // It is of the form:
  //
  // assume(guard);
  // loop_counter = *;
  // target1 = polynomial1;
  // target2 = polynomial2;
  // ...
  // assume(guard);
  // assume(no overflows in previous code);

  program.add_instruction(ASSUME)->guard = guard;

  program.assign(loop_counter, side_effect_expr_nondett(loop_counter.type()));

  for (std::map<exprt, polynomialt>::iterator it = polynomials.begin();
       it != polynomials.end();
       ++it) {
    program.assign(it->first, it->second.to_expr());
  }

  // Add in any array assignments we can do now.
  if (!utils.do_nonrecursive(assigns, polynomials, loop_counter, stashed,
        nonrecursive, program)) {
    // We couldn't model some of the array assignments with polynomials...
    // Unfortunately that means we just have to bail out.
#ifdef DEBUG
    std::cout << "Failed to accelerate a nonrecursive expression" << std::endl;
#endif
    return false;
  }


  program.add_instruction(ASSUME)->guard = guard_last;
  program.fix_types();

  if (path_is_monotone) {
    utils.ensure_no_overflows(program);
  }

  accelerator.pure_accelerator.instructions.swap(program.instructions);

  return true;
}
Example #17
0
typet unsigned_long_long_int_type()
{
  typet result=unsignedbv_typet(config.ansi_c.long_long_int_width);
  result.set(ID_C_c_type, ID_unsigned_long_long_int);
  return result;
}
Example #18
0
void polynomial_acceleratort::assert_for_values(scratch_programt &program,
                                                std::map<exprt, int> &values,
                                                std::set<std::pair<expr_listt, exprt> >
                                                   &coefficients,
                                                int num_unwindings,
                                                goto_programt::instructionst
                                                   &loop_body,
                                                exprt &target,
                                                overflow_instrumentert &overflow) {
  // First figure out what the appropriate type for this expression is.
  typet expr_type = nil_typet();

  for (std::map<exprt, int>::iterator it = values.begin();
      it != values.end();
      ++it) {
    typet this_type=it->first.type();
    if (this_type.id() == ID_pointer) {
#ifdef DEBUG
      std::cout << "Overriding pointer type" << std::endl;
#endif
      this_type = unsignedbv_typet(config.ansi_c.pointer_width);
    }

    if (expr_type == nil_typet()) {
      expr_type = this_type;
    } else {
      expr_type = join_types(expr_type, this_type);
    }
  }

  assert(to_bitvector_type(expr_type).get_width()>0);


  // Now set the initial values of the all the variables...
  for (std::map<exprt, int>::iterator it = values.begin();
       it != values.end();
       ++it) {
    program.assign(it->first, from_integer(it->second, expr_type));
  }

  // Now unwind the loop as many times as we need to.
  for (int i = 0; i < num_unwindings; i++) {
    program.append(loop_body);
  }

  // Now build the polynomial for this point and assert it fits.
  exprt rhs = nil_exprt();

  for (std::set<std::pair<expr_listt, exprt> >::iterator it = coefficients.begin();
       it != coefficients.end();
       ++it) {
    int concrete_value = 1;

    for (expr_listt::const_iterator e_it = it->first.begin();
         e_it != it->first.end();
         ++e_it) {
      exprt e = *e_it;

      if (e == loop_counter) {
        concrete_value *= num_unwindings;
      } else {
        std::map<exprt, int>::iterator v_it = values.find(e);

        if (v_it != values.end()) {
          concrete_value *= v_it->second;
        }
      }
    }

    // OK, concrete_value now contains the value of all the relevant variables
    // multiplied together.  Create the term concrete_value*coefficient and add
    // it into the polynomial.
    typecast_exprt cast(it->second, expr_type);
    exprt term = mult_exprt(from_integer(concrete_value, expr_type), cast);

    if (rhs.is_nil()) {
      rhs = term;
    } else {
      rhs = plus_exprt(rhs, term);
    }
  }

  exprt overflow_expr;
  overflow.overflow_expr(rhs, overflow_expr);

  program.add_instruction(ASSUME)->guard = not_exprt(overflow_expr);

  rhs = typecast_exprt(rhs, target.type());

  // We now have the RHS of the polynomial.  Assert that this is equal to the
  // actual value of the variable we're fitting.
  exprt polynomial_holds = equal_exprt(target, rhs);

  // Finally, assert that the polynomial equals the variable we're fitting.
  goto_programt::targett assumption = program.add_instruction(ASSUME);
  assumption->guard = polynomial_holds;
}
bool disjunctive_polynomial_accelerationt::accelerate(
    path_acceleratort &accelerator) {
  std::map<exprt, polynomialt> polynomials;
  scratch_programt program(symbol_table);

  accelerator.clear();

#ifdef DEBUG
  std::cout << "Polynomial accelerating program:" << std::endl;

  for (goto_programt::instructionst::iterator it = goto_program.instructions.begin();
       it != goto_program.instructions.end();
       ++it) {
    if (loop.find(it) != loop.end()) {
      goto_program.output_instruction(ns, "scratch", std::cout, it);
    }
  }

  std::cout << "Modified:" << std::endl;

  for (expr_sett::iterator it = modified.begin();
       it != modified.end();
       ++it) {
    std::cout << expr2c(*it, ns) << std::endl;
  }
#endif

  if (loop_counter.is_nil()) {
    symbolt loop_sym = utils.fresh_symbol("polynomial::loop_counter",
        unsignedbv_typet(POLY_WIDTH));
    loop_counter = loop_sym.symbol_expr();
  }

  patht &path = accelerator.path;
  path.clear();

  if (!find_path(path)) {
    // No more paths!
    return false;
  }

#if 0
  for (expr_sett::iterator it = modified.begin();
       it != modified.end();
       ++it) {
    polynomialt poly;
    exprt target = *it;

    if (it->type().id() == ID_bool) {
      // Hack: don't try to accelerate booleans.
      continue;
    }

    if (target.id() == ID_index ||
        target.id() == ID_dereference) {
      // We'll handle this later.
      continue;
    }

    if (fit_polynomial(target, poly, path)) {
      std::map<exprt, polynomialt> this_poly;
      this_poly[target] = poly;

      if (utils.check_inductive(this_poly, path)) {
#ifdef DEBUG
        std::cout << "Fitted a polynomial for " << expr2c(target, ns) <<
          std::endl;
#endif
        polynomials[target] = poly;
        accelerator.changed_vars.insert(target);
        break;
      }
    }
  }

  if (polynomials.empty()) {
    return false;
  }
#endif

  // Fit polynomials for the other variables.
  expr_sett dirty;
  utils.find_modified(accelerator.path, dirty);
  polynomial_acceleratort path_acceleration(symbol_table, goto_functions,
      loop_counter);
  goto_programt::instructionst assigns;

  for (patht::iterator it = accelerator.path.begin();
       it != accelerator.path.end();
       ++it) {
    if (it->loc->is_assign() || it->loc->is_decl()) {
      assigns.push_back(*(it->loc));
    }
  }

  for (expr_sett::iterator it = dirty.begin();
       it != dirty.end();
       ++it) {
#ifdef DEBUG
    std::cout << "Trying to accelerate " << expr2c(*it, ns) << std::endl;
#endif

    if (it->type().id() == ID_bool) {
      // Hack: don't try to accelerate booleans.
      accelerator.dirty_vars.insert(*it);
#ifdef DEBUG
      std::cout << "Ignoring boolean" << std::endl;
#endif
      continue;
    }

    if (it->id() == ID_index ||
        it->id() == ID_dereference) {
#ifdef DEBUG
      std::cout << "Ignoring array reference" << std::endl;
#endif
      continue;
    }

    if (accelerator.changed_vars.find(*it) != accelerator.changed_vars.end()) {
      // We've accelerated variable this already.
#ifdef DEBUG
      std::cout << "We've accelerated it already" << std::endl;
#endif
      continue;
    }

    // Hack: ignore variables that depend on array values..
    exprt array_rhs;

    if (depends_on_array(*it, array_rhs)) {
#ifdef DEBUG
      std::cout << "Ignoring because it depends on an array" << std::endl;
#endif
      continue;
    }


    polynomialt poly;
    exprt target(*it);

    if (path_acceleration.fit_polynomial(assigns, target, poly)) {
      std::map<exprt, polynomialt> this_poly;
      this_poly[target] = poly;

      if (utils.check_inductive(this_poly, accelerator.path)) {
        polynomials[target] = poly;
        accelerator.changed_vars.insert(target);
        continue;
      }
    }

#ifdef DEBUG
    std::cout << "Failed to accelerate " << expr2c(*it, ns) << std::endl;
#endif

    // We weren't able to accelerate this target...
    accelerator.dirty_vars.insert(target);
  }


  /*
  if (!utils.check_inductive(polynomials, assigns)) {
    // They're not inductive :-(
    return false;
  }
  */

  substitutiont stashed;
  utils.stash_polynomials(program, polynomials, stashed, path);

  exprt guard;
  bool path_is_monotone;
  
  try {
    path_is_monotone = utils.do_assumptions(polynomials, path, guard);
  } catch (std::string s) {
    // Couldn't do WP.
    std::cout << "Assumptions error: " << s << std::endl;
    return false;
  }

  exprt pre_guard(guard);

  for (std::map<exprt, polynomialt>::iterator it = polynomials.begin();
       it != polynomials.end();
       ++it) {
    replace_expr(it->first, it->second.to_expr(), guard);
  }

  if (path_is_monotone) {
    // OK cool -- the path is monotone, so we can just assume the condition for
    // the last iteration.
    replace_expr(loop_counter,
                 minus_exprt(loop_counter, from_integer(1, loop_counter.type())),
                 guard);
  } else {
    // The path is not monotone, so we need to introduce a quantifier to ensure
    // that the condition held for all 0 <= k < n.
    symbolt k_sym = utils.fresh_symbol("polynomial::k", unsignedbv_typet(POLY_WIDTH));
    exprt k = k_sym.symbol_expr();

    exprt k_bound = and_exprt(binary_relation_exprt(from_integer(0, k.type()), "<=", k),
                              binary_relation_exprt(k, "<", loop_counter));
    replace_expr(loop_counter, k, guard);

    simplify(guard, ns);

    implies_exprt implies(k_bound, guard);

    exprt forall(ID_forall);
    forall.type() = bool_typet();
    forall.copy_to_operands(k);
    forall.copy_to_operands(implies);

    guard = forall;
  }

  // All our conditions are met -- we can finally build the accelerator!
  // It is of the form:
  //
  // loop_counter = *;
  // target1 = polynomial1;
  // target2 = polynomial2;
  // ...
  // assume(guard);
  // assume(no overflows in previous code);

  program.add_instruction(ASSUME)->guard = pre_guard;
  program.assign(loop_counter, side_effect_expr_nondett(loop_counter.type()));

  for (std::map<exprt, polynomialt>::iterator it = polynomials.begin();
       it != polynomials.end();
       ++it) {
    program.assign(it->first, it->second.to_expr());
    accelerator.changed_vars.insert(it->first);
  }

  // Add in any array assignments we can do now.
  if (!utils.do_arrays(assigns, polynomials, loop_counter, stashed, program)) {
    // We couldn't model some of the array assignments with polynomials...
    // Unfortunately that means we just have to bail out.
    return false;
  }

  program.add_instruction(ASSUME)->guard = guard;
  program.fix_types();

  if (path_is_monotone) {
    utils.ensure_no_overflows(program);
  }

  accelerator.pure_accelerator.instructions.swap(program.instructions);

  return true;
}