static uint64_t _pipe_stream_read( stream_t* stream, void* dest, uint64_t num ) { stream_pipe_t* pipestream = (stream_pipe_t*)stream; FOUNDATION_ASSERT( stream->type == STREAMTYPE_PIPE ); #if FOUNDATION_PLATFORM_WINDOWS if( pipestream->handle_read && ( ( pipestream->mode & STREAM_IN ) != 0 ) ) { uint64_t total_read = 0; do { unsigned long num_read = 0; if( !ReadFile( pipestream->handle_read, pointer_offset( dest, total_read ), (unsigned int)( num - total_read ), &num_read, 0 ) ) break; total_read += num_read; } while( total_read < num ); return total_read; } #elif FOUNDATION_PLATFORM_POSIX if( pipestream->fd_read && ( ( pipestream->mode & STREAM_IN ) != 0 ) ) { uint64_t total_read = 0; do { ssize_t num_read = read( pipestream->fd_read, pointer_offset( dest, total_read ), (size_t)( num - total_read ) ); if( num_read < 0 ) break; total_read += num_read; } while( total_read < num ); return total_read; } #endif return 0; }
void network_poll_initialize(network_poll_t* pollobj, unsigned int num_sockets) { pollobj->num_sockets = 0; pollobj->max_sockets = num_sockets; #if FOUNDATION_PLATFORM_APPLE pollobj->pollfds = pointer_offset(pollobj->slots, sizeof(network_poll_slot_t) * num_sockets); #elif FOUNDATION_PLATFORM_LINUX || FOUNDATION_PLATFORM_ANDROID pollobj->events = pointer_offset(pollobj->slots, sizeof(network_poll_slot_t) * num_sockets); pollobj->fd_poll = epoll_create((int)num_sockets); #endif }
static void* _memory_guard_initialize(void* memory, size_t size) { int guard_loop; uint32_t* guard_header = pointer_offset(memory, FOUNDATION_MAX_ALIGN); uint32_t* guard_footer = pointer_offset(memory, size + FOUNDATION_MAX_ALIGN * 2); *(size_t*)memory = size; for (guard_loop = 0; guard_loop < FOUNDATION_MAX_ALIGN / 4; ++guard_loop) { *guard_header++ = MEMORY_GUARD_VALUE; *guard_footer++ = MEMORY_GUARD_VALUE; } return pointer_offset(memory, FOUNDATION_MAX_ALIGN * 2); }
stream_t* socket_stream_allocate(socket_t* sock, size_t buffer_in, size_t buffer_out) { size_t size = sizeof(socket_stream_t) + buffer_in + buffer_out; socket_stream_t* sockstream = memory_allocate(HASH_NETWORK, size, 0, MEMORY_PERSISTENT | MEMORY_ZERO_INITIALIZED); sockstream->buffer_in = pointer_offset(sockstream, sizeof(socket_stream_t)); sockstream->buffer_out = pointer_offset(sockstream->buffer_in, buffer_in); sockstream->buffer_in_size = buffer_in; sockstream->buffer_out_size = buffer_out; socket_stream_initialize(sockstream, sock); return (stream_t*)sockstream; }
static uint64_t _pipe_stream_read( stream_t* stream, void* dest, uint64_t num ) { stream_pipe_t* pipestream = (stream_pipe_t*)stream; FOUNDATION_ASSERT( stream->type == STREAMTYPE_PIPE ); #if FOUNDATION_PLATFORM_WINDOWS if( pipestream->handle_read && ( ( pipestream->mode & STREAM_IN ) != 0 ) ) { uint64_t total_read = 0; do { unsigned long num_read = 0; if( !ReadFile( pipestream->handle_read, pointer_offset( dest, total_read ), (unsigned int)( num - total_read ), &num_read, 0 ) ) { int err = GetLastError(); if( err == ERROR_BROKEN_PIPE ) { pipestream->eos = true; break; } log_warnf( 0, WARNING_SYSTEM_CALL_FAIL, "Unable to read from pipe: %s (%d)", system_error_message( err ), err ); } else { total_read += num_read; } } while( total_read < num ); return total_read; } #elif FOUNDATION_PLATFORM_POSIX || FOUNDATION_PLATFORM_PNACL if( pipestream->fd_read && ( ( pipestream->mode & STREAM_IN ) != 0 ) ) { uint64_t total_read = 0; do { ssize_t num_read = read( pipestream->fd_read, pointer_offset( dest, total_read ), (size_t)( num - total_read ) ); if( num_read <= 0 ) { pipestream->eos = true; break; } total_read += num_read; } while( total_read < num ); return total_read; } #endif return 0; }
static uint64_t _ringbuffer_stream_read( stream_t* stream, void* dest, uint64_t num ) { stream_ringbuffer_t* rbstream = (stream_ringbuffer_t*)stream; ringbuffer_t* buffer = RINGBUFFER_FROM_STREAM( rbstream ); unsigned int num_read = ringbuffer_read( buffer, dest, (unsigned int)num ); while( num_read < num ) { rbstream->pending_read = 1; if( rbstream->pending_write ) semaphore_post( &rbstream->signal_read ); semaphore_wait( &rbstream->signal_write ); rbstream->pending_read = 0; num_read += ringbuffer_read( buffer, dest ? pointer_offset( dest, num_read ) : 0, (unsigned int)( num - num_read ) ); } if( rbstream->pending_write ) semaphore_post( &rbstream->signal_read ); return num_read; }
static void* _memory_guard_verify(void* memory) { int guard_loop; size_t size = *(size_t*)pointer_offset(memory, -FOUNDATION_MAX_ALIGN * 2); uint32_t* guard_header = pointer_offset(memory, -FOUNDATION_MAX_ALIGN); uint32_t* guard_footer = pointer_offset(memory, size); for (guard_loop = 0; guard_loop < FOUNDATION_MAX_ALIGN / 4; ++guard_loop) { if (*guard_header != MEMORY_GUARD_VALUE) FOUNDATION_ASSERT_MSG(*guard_header == MEMORY_GUARD_VALUE, "Memory underwrite"); if (*guard_footer != MEMORY_GUARD_VALUE) FOUNDATION_ASSERT_MSG(*guard_footer == MEMORY_GUARD_VALUE, "Memory overwrite"); guard_header++; guard_footer++; } return pointer_offset(memory, -FOUNDATION_MAX_ALIGN * 2); }
static void _atomic_allocate_initialize( uint64_t storagesize ) { if( storagesize < 1024 ) storagesize = BUILD_SIZE_TEMPORARY_MEMORY; _memory_temporary.storage = memory_allocate( storagesize, FOUNDATION_PLATFORM_POINTER_SIZE, MEMORY_PERSISTENT ); _memory_temporary.end = pointer_offset( _memory_temporary.storage, storagesize ); _memory_temporary.head = _memory_temporary.storage; _memory_temporary.size = storagesize; _memory_temporary.maxchunk = ( storagesize / 8 ); }
static void _atomic_allocate_initialize( uint64_t storagesize ) { if( storagesize < 1024 ) storagesize = BUILD_SIZE_TEMPORARY_MEMORY; _memory_temporary.storage = memory_allocate( 0, storagesize, 16, MEMORY_PERSISTENT ); _memory_temporary.end = pointer_offset( _memory_temporary.storage, storagesize ); _memory_temporary.size = storagesize; _memory_temporary.maxchunk = ( storagesize / 8 ); atomic_storeptr( &_memory_temporary.head, _memory_temporary.storage ); }
network_poll_t* network_poll_allocate(unsigned int num_sockets) { network_poll_t* poll; size_t memsize = sizeof(network_poll_t) + sizeof(network_poll_slot_t) * num_sockets; #if FOUNDATION_PLATFORM_APPLE memsize += sizeof(struct pollfd) * num_sockets; #elif FOUNDATION_PLATFORM_LINUX || FOUNDATION_PLATFORM_ANDROID memsize += sizeof(struct epoll_event) * num_sockets; #endif poll = memory_allocate(HASH_NETWORK, memsize, 8, MEMORY_PERSISTENT | MEMORY_ZERO_INITIALIZED); poll->max_sockets = num_sockets; #if FOUNDATION_PLATFORM_APPLE poll->pollfds = pointer_offset(poll->slots, sizeof(network_poll_slot_t) * num_sockets); #elif FOUNDATION_PLATFORM_LINUX || FOUNDATION_PLATFORM_ANDROID poll->events = pointer_offset(poll->slots, sizeof(network_poll_slot_t) * num_sockets); poll->fd_poll = epoll_create(num_sockets); #endif return poll; }
static void* _atomic_allocate_linear(size_t chunksize) { void* old_head; void* new_head; void* return_pointer = 0; do { old_head = atomic_loadptr(&_memory_temporary.head); new_head = pointer_offset(old_head, chunksize); return_pointer = old_head; if (new_head > _memory_temporary.end) { new_head = pointer_offset(_memory_temporary.storage, chunksize); return_pointer = _memory_temporary.storage; } } while (!atomic_cas_ptr(&_memory_temporary.head, new_head, old_head)); return return_pointer; }
static void _atomic_allocate_initialize(size_t storagesize) { if (storagesize < 1024) storagesize = _foundation_config.temporary_memory; if (!storagesize) { memset(&_memory_temporary, 0, sizeof(_memory_temporary)); return; } _memory_temporary.storage = memory_allocate(0, storagesize, 16, MEMORY_PERSISTENT); _memory_temporary.end = pointer_offset(_memory_temporary.storage, storagesize); _memory_temporary.size = storagesize; _memory_temporary.maxchunk = (storagesize / 8); atomic_storeptr(&_memory_temporary.head, _memory_temporary.storage); }
static uint64_t _buffer_stream_read( stream_t* stream, void* dest, uint64_t num ) { stream_buffer_t* buffer_stream = (stream_buffer_t*)stream; uint64_t available = buffer_stream->size - buffer_stream->current; uint64_t num_read = ( num < available ) ? num : available; if( num_read > 0 ) { memcpy( dest, pointer_offset( buffer_stream->buffer, buffer_stream->current ), (size_t)num_read ); buffer_stream->current += num_read; return num_read; } return 0; }
static size_t _buffer_stream_read(stream_t* stream, void* dest, size_t num) { size_t available, num_read; stream_buffer_t* buffer_stream = (stream_buffer_t*)stream; FOUNDATION_ASSERT(buffer_stream->size >= buffer_stream->current); available = buffer_stream->size - buffer_stream->current; num_read = (num < available) ? num : available; if (num_read > 0) { memcpy(dest, pointer_offset(buffer_stream->buffer, buffer_stream->current), num_read); buffer_stream->current += num_read; return num_read; } return 0; }
void genprim_pointer_methods(compile_t* c, reach_type_t* t) { ast_t* typeargs = ast_childidx(t->ast, 2); ast_t* typearg = ast_child(typeargs); reach_type_t* t_elem = reach_type(c->reach, typearg); pointer_create(c, t); pointer_alloc(c, t, t_elem); pointer_realloc(c, t, t_elem); pointer_unsafe(c, t); pointer_apply(c, t, t_elem); pointer_update(c, t, t_elem); pointer_offset(c, t, t_elem); pointer_insert(c, t, t_elem); pointer_delete(c, t, t_elem); pointer_copy_to(c, t, t_elem); pointer_usize(c, t); }
unsigned int ringbuffer_read( ringbuffer_t* buffer, void* dest, unsigned int num ) { unsigned int do_read; unsigned int max_read; unsigned int buffer_size; unsigned int offset_read; unsigned int offset_write; FOUNDATION_ASSERT( buffer ); buffer_size = buffer->buffer_size; offset_read = buffer->offset_read; offset_write = buffer->offset_write; if( offset_read > offset_write ) max_read = buffer_size - offset_read; else max_read = offset_write - offset_read; do_read = num; if( do_read > max_read ) do_read = max_read; if( !do_read ) return 0; if( dest ) memcpy( dest, buffer->buffer + offset_read, do_read ); offset_read += do_read; if( offset_read == buffer_size ) offset_read = 0; buffer->offset_read = offset_read; buffer->total_read += do_read; if( ( do_read < num ) && ( offset_read == 0 ) && ( offset_write > 0 ) ) do_read += ringbuffer_read( buffer, pointer_offset( dest, do_read ), num - do_read ); return do_read; }
static size_t _buffer_stream_write(stream_t* stream, const void* source, size_t num) { size_t available, want, num_write; stream_buffer_t* buffer_stream = (stream_buffer_t*)stream; FOUNDATION_ASSERT(buffer_stream->size >= buffer_stream->current); available = buffer_stream->size - buffer_stream->current; want = num; if (want > available) { if (buffer_stream->capacity >= (buffer_stream->current + want)) { available = want; buffer_stream->size = buffer_stream->current + want; } else if (buffer_stream->grow) { size_t prev_capacity = buffer_stream->capacity; available = want; buffer_stream->size = buffer_stream->current + want; buffer_stream->capacity = (buffer_stream->size < 1024) ? 1024 : buffer_stream->size + 1024; //tail segment from current to size will be overwritten buffer_stream->buffer = memory_reallocate(buffer_stream->buffer, buffer_stream->capacity, 0, prev_capacity); } else { available = buffer_stream->capacity - buffer_stream->current; buffer_stream->size = buffer_stream->capacity; } } buffer_stream->lastmod = time_current(); num_write = (want < available) ? want : available; if (num_write > 0) { memcpy(pointer_offset(buffer_stream->buffer, buffer_stream->current), source, num_write); buffer_stream->current += num_write; return num_write; } return 0; }
exprt ssa_alias_value( const exprt &e1, const exprt &e2, const namespacet &ns) { const typet &e1_type=ns.follow(e1.type()); const typet &e2_type=ns.follow(e2.type()); // type matches? if(e1_type==e2_type) return e2; exprt a1=address_canonizer(address_of_exprt(e1), ns); exprt a2=address_canonizer(address_of_exprt(e2), ns); exprt offset1=pointer_offset(a1); // array index possible? if(e2_type.id()==ID_array && e1_type==ns.follow(e2_type.subtype())) { // this assumes well-alignedness mp_integer element_size=pointer_offset_size(e2_type.subtype(), ns); if(element_size==1) return index_exprt(e2, offset1, e1.type()); else if(element_size>1) { exprt index= div_exprt(offset1, from_integer(element_size, offset1.type())); return index_exprt(e2, index, e1.type()); } } byte_extract_exprt byte_extract(byte_extract_id(), e1.type()); byte_extract.op()=e2; byte_extract.offset()=offset1; return byte_extract; }
static uint64_t _buffer_stream_write( stream_t* stream, const void* source, uint64_t num ) { stream_buffer_t* buffer_stream = (stream_buffer_t*)stream; uint64_t available = buffer_stream->size - buffer_stream->current; uint64_t want = num; uint64_t num_write; if( want > available ) { if( buffer_stream->capacity >= ( buffer_stream->current + want ) ) { available = want; buffer_stream->size = buffer_stream->current + want; } else if( buffer_stream->grow ) { available = want; buffer_stream->size = buffer_stream->current + want; buffer_stream->capacity = ( buffer_stream->size < 1024 ) ? 1024 : buffer_stream->size + 1024; buffer_stream->buffer = memory_reallocate( buffer_stream->buffer, buffer_stream->capacity, 0, buffer_stream->current ); //tail segment from current to size will be overwritten } else { available = buffer_stream->capacity - buffer_stream->current; buffer_stream->size = buffer_stream->capacity; } } num_write = ( want < available ) ? want : available; if( num_write > 0 ) { memcpy( pointer_offset( buffer_stream->buffer, buffer_stream->current ), source, (size_t)num_write ); buffer_stream->current += num_write; return num_write; } return 0; }
void* render_vertexbuffer_element(object_t id, size_t element) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return pointer_offset(buffer->access, buffer->size * element); }
void blowfish_decrypt( const blowfish_t* blowfish, void* data, unsigned int length, const blowfish_mode_t mode, const uint64_t vec ) { uint32_t* RESTRICT cur; uint32_t* RESTRICT end; uint32_t chain[2]; uint32_t prev_chain[2]; uint32_t swap_chain[2]; if( length % 8 ) length -= ( length % 8 ); if( !data || !length ) return; /*lint --e{826} */ cur = data; end = pointer_offset( data, length ); chain[0] = (uint32_t)( ( vec >> 32ULL ) & 0xFFFFFFFFU ); chain[1] = (uint32_t)( vec & 0xFFFFFFFFU ); switch( mode ) { case BLOWFISH_ECB: { for( ; cur < end; cur += 2 ) _blowfish_decrypt_words( blowfish, cur, cur + 1 ); break; } case BLOWFISH_CBC: { for( ; cur < end; cur += 2 ) { prev_chain[0] = cur[0]; prev_chain[1] = cur[1]; _blowfish_decrypt_words( blowfish, cur, cur + 1 ); cur[0] ^= chain[0]; cur[1] ^= chain[1]; swap_chain[0] = chain[0]; swap_chain[1] = chain[1]; chain[0] = prev_chain[0]; chain[1] = prev_chain[1]; prev_chain[0] = swap_chain[0]; prev_chain[1] = swap_chain[1]; } break; } case BLOWFISH_CFB: { for( ; cur < end; cur += 2 ) { prev_chain[0] = cur[0]; prev_chain[1] = cur[1]; _blowfish_encrypt_words( blowfish, chain, chain + 1 ); cur[0] ^= chain[0]; cur[1] ^= chain[1]; swap_chain[0] = chain[0]; swap_chain[1] = chain[1]; chain[0] = prev_chain[0]; chain[1] = prev_chain[1]; prev_chain[0] = swap_chain[0]; prev_chain[1] = swap_chain[1]; } break; } case BLOWFISH_OFB: { for( ; cur < end; cur += 2 ) { _blowfish_encrypt_words( blowfish, chain, chain + 1 ); cur[0] ^= chain[0]; cur[1] ^= chain[1]; } break; } default: break; } //Reset memory for paranoids /*lint --e{438} */ chain[0] = 0; chain[1] = 0; prev_chain[0] = 0; prev_chain[1] = 0; swap_chain[0] = 0; swap_chain[1] = 0; }
static void* blast_reader_map(blast_reader_t* reader, uint64_t offset, int size) { FOUNDATION_UNUSED(size); return pointer_offset(reader->data, offset); }
bool simplify_exprt::simplify_pointer_offset(exprt &expr) { if(expr.operands().size()!=1) return true; exprt &ptr=expr.op0(); if(ptr.id()==ID_if && ptr.operands().size()==3) { if_exprt if_expr=lift_if(expr, 0); simplify_pointer_offset(if_expr.true_case()); simplify_pointer_offset(if_expr.false_case()); simplify_if(if_expr); expr.swap(if_expr); return false; } if(ptr.type().id()!=ID_pointer) return true; if(ptr.id()==ID_address_of) { if(ptr.operands().size()!=1) return true; mp_integer offset=compute_pointer_offset(ptr.op0(), ns); if(offset!=-1) { expr=from_integer(offset, expr.type()); return false; } } else if(ptr.id()==ID_typecast) // pointer typecast { if(ptr.operands().size()!=1) return true; const typet &op_type=ns.follow(ptr.op0().type()); if(op_type.id()==ID_pointer) { // Cast from pointer to pointer. // This just passes through, remove typecast. exprt tmp=ptr.op0(); ptr=tmp; // recursive call simplify_node(expr); return false; } else if(op_type.id()==ID_signedbv || op_type.id()==ID_unsignedbv) { // Cast from integer to pointer, say (int *)x. if(ptr.op0().is_constant()) { // (T *)0x1234 -> 0x1234 exprt tmp=ptr.op0(); tmp.make_typecast(expr.type()); simplify_node(tmp); expr.swap(tmp); return false; } else { // We do a bit of special treatment for (TYPE *)(a+(int)&o), // which is re-written to 'a'. typet type=ns.follow(expr.type()); exprt tmp=ptr.op0(); if(tmp.id()==ID_plus && tmp.operands().size()==2) { if(tmp.op0().id()==ID_typecast && tmp.op0().operands().size()==1 && tmp.op0().op0().id()==ID_address_of) { expr=tmp.op1(); if(type!=expr.type()) expr.make_typecast(type); simplify_node(expr); return false; } else if(tmp.op1().id()==ID_typecast && tmp.op1().operands().size()==1 && tmp.op1().op0().id()==ID_address_of) { expr=tmp.op0(); if(type!=expr.type()) expr.make_typecast(type); simplify_node(expr); return false; } } } } } else if(ptr.id()==ID_plus) // pointer arithmetic { exprt::operandst ptr_expr; exprt::operandst int_expr; for(const auto & op : ptr.operands()) { if(op.type().id()==ID_pointer) ptr_expr.push_back(op); else if(!op.is_zero()) { exprt tmp=op; if(tmp.type()!=expr.type()) { tmp.make_typecast(expr.type()); simplify_node(tmp); } int_expr.push_back(tmp); } } if(ptr_expr.size()!=1 || int_expr.empty()) return true; typet pointer_type=ptr_expr.front().type(); mp_integer element_size= pointer_offset_size(pointer_type.subtype(), ns); if(element_size==0) return true; // this might change the type of the pointer! exprt pointer_offset(ID_pointer_offset, expr.type()); pointer_offset.copy_to_operands(ptr_expr.front()); simplify_node(pointer_offset); exprt sum; if(int_expr.size()==1) sum=int_expr.front(); else { sum=exprt(ID_plus, expr.type()); sum.operands()=int_expr; } simplify_node(sum); exprt size_expr= from_integer(element_size, expr.type()); binary_exprt product(sum, ID_mult, size_expr, expr.type()); simplify_node(product); expr=binary_exprt(pointer_offset, ID_plus, product, expr.type()); simplify_node(expr); return false; } else if(ptr.id()==ID_constant && ptr.get(ID_value)==ID_NULL) { expr=gen_zero(expr.type()); simplify_node(expr); return false; } return true; }
static void* _memory_reallocate_malloc(void* p, size_t size, unsigned int align, size_t oldsize) { #if ( FOUNDATION_SIZE_POINTER == 4 ) && FOUNDATION_PLATFORM_WINDOWS FOUNDATION_UNUSED(oldsize); align = _memory_get_align(align); # if BUILD_ENABLE_MEMORY_GUARD if (p) { p = _memory_guard_verify(p); p = _aligned_realloc(p, (size_t)size + FOUNDATION_MAX_ALIGN * 3, align); } else { p = _aligned_malloc((size_t)size + FOUNDATION_MAX_ALIGN * 3, align); } if (p) p = _memory_guard_initialize(p, (size_t)size); return p; # else return _aligned_realloc(p, (size_t)size, align); # endif #else void* memory; void* raw_p; align = _memory_get_align(align); memory = p; # if BUILD_ENABLE_MEMORY_GUARD if (memory) memory = _memory_guard_verify(memory); # endif raw_p = memory ? *((void**)memory - 1) : nullptr; memory = nullptr; #if FOUNDATION_PLATFORM_WINDOWS if (raw_p && !((uintptr_t)raw_p & 1)) { size_t padding = (align > FOUNDATION_SIZE_POINTER ? align : FOUNDATION_SIZE_POINTER); # if BUILD_ENABLE_MEMORY_GUARD size_t extra_padding = FOUNDATION_MAX_ALIGN * 3; # else size_t extra_padding = 0; # endif void* raw_memory = _aligned_realloc(raw_p, size + padding + extra_padding, align ? align : 8); if (raw_memory) { memory = pointer_offset(raw_memory, padding); *((void**)memory - 1) = raw_memory; # if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); # endif } } else { # if FOUNDATION_SIZE_POINTER == 4 memory = _memory_allocate_malloc_raw(size, align, 0U); # else memory = _memory_allocate_malloc_raw(size, align, (raw_p && ((uintptr_t)raw_p < 0xFFFFFFFFULL)) ? MEMORY_32BIT_ADDRESS : 0U); # endif if (p && memory && oldsize) memcpy(memory, p, (size < oldsize) ? size : oldsize); _memory_deallocate_malloc(p); } #else //!FOUNDATION_PLATFORM_WINDOWS //If we're on ARM the realloc can return a 16-bit aligned address, causing raw pointer store to SIGILL //Realigning does not work since the realloc memory copy preserve cannot be done properly. Revert to normal alloc-and-copy //Same with alignment, since we cannot guarantee that the returned memory block offset from start of actual memory block //is the same in the reallocated block as the original block, we need to alloc-and-copy to get alignment //Memory guard introduces implicit alignments as well so alloc-and-copy for that #if !FOUNDATION_ARCH_ARM && !FOUNDATION_ARCH_ARM_64 && !BUILD_ENABLE_MEMORY_GUARD if (!align && raw_p && !((uintptr_t)raw_p & 1)) { void* raw_memory = realloc(raw_p, (size_t)size + FOUNDATION_SIZE_POINTER); if (raw_memory) { *(void**)raw_memory = raw_memory; memory = pointer_offset(raw_memory, FOUNDATION_SIZE_POINTER); } } else #endif { # if FOUNDATION_SIZE_POINTER == 4 # if !BUILD_ENABLE_LOG FOUNDATION_UNUSED(raw_p); # endif memory = _memory_allocate_malloc_raw(size, align, 0U); # else memory = _memory_allocate_malloc_raw(size, align, (raw_p && ((uintptr_t)raw_p < 0xFFFFFFFFULL)) ? MEMORY_32BIT_ADDRESS : 0U); # endif if (p && memory && oldsize) memcpy(memory, p, (size < oldsize) ? (size_t)size : (size_t)oldsize); _memory_deallocate_malloc(p); } #endif if (!memory) { string_const_t errmsg = system_error_message(0); log_panicf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to reallocate memory (%" PRIsize " -> %" PRIsize " @ 0x%" PRIfixPTR ", raw 0x%" PRIfixPTR "): %.*s"), oldsize, size, (uintptr_t)p, (uintptr_t)raw_p, STRING_FORMAT(errmsg)); } return memory; #endif }
static void* _memory_allocate_malloc_raw(size_t size, unsigned int align, unsigned int hint) { FOUNDATION_UNUSED(hint); //If we align manually, we must be able to retrieve the original pointer for passing to free() //Thus all allocations need to go through that path #if FOUNDATION_PLATFORM_WINDOWS # if FOUNDATION_SIZE_POINTER == 4 # if BUILD_ENABLE_MEMORY_GUARD char* memory = _aligned_malloc((size_t)size + FOUNDATION_MAX_ALIGN * 3, align); if (memory) memory = _memory_guard_initialize(memory, (size_t)size); return memory; # else return _aligned_malloc((size_t)size, align); # endif # else unsigned int padding, extra_padding = 0; size_t allocate_size; char* raw_memory; void* memory; long vmres; if (!(hint & MEMORY_32BIT_ADDRESS)) { padding = (align > FOUNDATION_SIZE_POINTER ? align : FOUNDATION_SIZE_POINTER); #if BUILD_ENABLE_MEMORY_GUARD extra_padding = FOUNDATION_MAX_ALIGN * 3; #endif raw_memory = _aligned_malloc((size_t)size + padding + extra_padding, align); if (raw_memory) { memory = raw_memory + padding; //Will be aligned since padding is multiple of alignment (minimum align/pad is pointer size) *((void**)memory - 1) = raw_memory; FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1)); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); #if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); #endif return memory; } log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory"), size); return 0; } # if BUILD_ENABLE_MEMORY_GUARD extra_padding = FOUNDATION_MAX_ALIGN * 3; # endif allocate_size = size + FOUNDATION_SIZE_POINTER + extra_padding + align; raw_memory = 0; vmres = NtAllocateVirtualMemory(INVALID_HANDLE_VALUE, &raw_memory, 1, &allocate_size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (vmres != 0) { log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory in low 32bit address space"), size); return 0; } memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER, align); *((void**)memory - 1) = (void*)((uintptr_t)raw_memory | 1); # if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); # endif FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1)); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); return memory; # endif #else # if FOUNDATION_SIZE_POINTER > 4 if (!(hint & MEMORY_32BIT_ADDRESS)) # endif { #if BUILD_ENABLE_MEMORY_GUARD size_t extra_padding = FOUNDATION_MAX_ALIGN * 3; #else size_t extra_padding = 0; #endif size_t allocate_size = size + align + FOUNDATION_SIZE_POINTER + extra_padding; char* raw_memory = malloc(allocate_size); if (raw_memory) { void* memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER, align); *((void**)memory - 1) = raw_memory; FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1)); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); #if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); #endif return memory; } log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory (%" PRIsize " requested)"), size, allocate_size); return 0; } # if FOUNDATION_SIZE_POINTER > 4 size_t allocate_size; char* raw_memory; void* memory; # if BUILD_ENABLE_MEMORY_GUARD unsigned int extra_padding = FOUNDATION_MAX_ALIGN * 3; #else unsigned int extra_padding = 0; # endif allocate_size = size + align + FOUNDATION_SIZE_POINTER * 2 + extra_padding; #ifndef MAP_UNINITIALIZED #define MAP_UNINITIALIZED 0 #endif #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif # ifndef MAP_32BIT //On MacOSX app needs to be linked with -pagezero_size 10000 -image_base 100000000 to // 1) Free up low 4Gb address range by reducing page zero size // 2) Move executable base address above 4Gb to free up more memory address space #define MMAP_REGION_START ((uintptr_t)0x10000) #define MMAP_REGION_END ((uintptr_t)0x80000000) static atomicptr_t baseaddr = { (void*)MMAP_REGION_START }; bool retried = false; do { raw_memory = mmap(atomic_loadptr(&baseaddr), allocate_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0); if (((uintptr_t)raw_memory >= MMAP_REGION_START) && (uintptr_t)(raw_memory + allocate_size) < MMAP_REGION_END) { atomic_storeptr(&baseaddr, pointer_offset(raw_memory, allocate_size)); break; } if (raw_memory && (raw_memory != MAP_FAILED)) { if (munmap(raw_memory, allocate_size) < 0) log_warn(HASH_MEMORY, WARNING_SYSTEM_CALL_FAIL, STRING_CONST("Failed to munmap pages outside 32-bit range")); } raw_memory = 0; if (retried) break; retried = true; atomic_storeptr(&baseaddr, (void*)MMAP_REGION_START); } while (true); # else raw_memory = mmap(0, allocate_size, PROT_READ | PROT_WRITE, MAP_32BIT | MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0); if (raw_memory == MAP_FAILED) { raw_memory = mmap(0, allocate_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0); if (raw_memory == MAP_FAILED) raw_memory = 0; if ((uintptr_t)raw_memory > 0xFFFFFFFFULL) { if (munmap(raw_memory, allocate_size) < 0) log_warn(HASH_MEMORY, WARNING_SYSTEM_CALL_FAIL, STRING_CONST("Failed to munmap pages outside 32-bit range")); raw_memory = 0; } } # endif if (!raw_memory) { string_const_t errmsg = system_error_message(0); log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory in low 32bit address space: %.*s"), size, STRING_FORMAT(errmsg)); return 0; } memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER * 2, align); *((uintptr_t*)memory - 1) = ((uintptr_t)raw_memory | 1); *((uintptr_t*)memory - 2) = (uintptr_t)allocate_size; FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1)); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); # if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); # endif return memory; # endif #endif }
value_set_dereferencet::valuet value_set_dereferencet::build_reference_to( const exprt &what, const modet mode, const exprt &pointer_expr, const guardt &guard) { const typet &dereference_type= ns.follow(pointer_expr.type()).subtype(); if(what.id()==ID_unknown || what.id()==ID_invalid) { invalid_pointer(pointer_expr, guard); return valuet(); } if(what.id()!=ID_object_descriptor) throw "unknown points-to: "+what.id_string(); const object_descriptor_exprt &o=to_object_descriptor_expr(what); const exprt &root_object=o.root_object(); const exprt &object=o.object(); #if 0 std::cout << "O: " << from_expr(ns, "", root_object) << '\n'; #endif valuet result; if(root_object.id()=="NULL-object") { if(options.get_bool_option("pointer-check")) { guardt tmp_guard(guard); if(o.offset().is_zero()) { tmp_guard.add(null_pointer(pointer_expr)); dereference_callback.dereference_failure( "pointer dereference", "NULL pointer", tmp_guard); } else { tmp_guard.add(null_object(pointer_expr)); dereference_callback.dereference_failure( "pointer dereference", "NULL plus offset pointer", tmp_guard); } } } else if(root_object.id()==ID_dynamic_object) { // const dynamic_object_exprt &dynamic_object= // to_dynamic_object_expr(root_object); // the object produced by malloc exprt malloc_object= ns.lookup(CPROVER_PREFIX "malloc_object").symbol_expr(); exprt is_malloc_object=same_object(pointer_expr, malloc_object); // constraint that it actually is a dynamic object exprt dynamic_object_expr(ID_dynamic_object, bool_typet()); dynamic_object_expr.copy_to_operands(pointer_expr); // this is also our guard result.pointer_guard=dynamic_object_expr; // can't remove here, turn into *p result.value=dereference_exprt(pointer_expr, dereference_type); if(options.get_bool_option("pointer-check")) { // if(!dynamic_object.valid().is_true()) { // check if it is still alive guardt tmp_guard(guard); tmp_guard.add(deallocated(pointer_expr, ns)); dereference_callback.dereference_failure( "pointer dereference", "dynamic object deallocated", tmp_guard); } if(options.get_bool_option("bounds-check")) { if(!o.offset().is_zero()) { // check lower bound guardt tmp_guard(guard); tmp_guard.add(is_malloc_object); tmp_guard.add( dynamic_object_lower_bound( pointer_expr, ns, nil_exprt())); dereference_callback.dereference_failure( "pointer dereference", "dynamic object lower bound", tmp_guard); } { // check upper bound // we check SAME_OBJECT(__CPROVER_malloc_object, p) && // POINTER_OFFSET(p)+size>__CPROVER_malloc_size guardt tmp_guard(guard); tmp_guard.add(is_malloc_object); tmp_guard.add( dynamic_object_upper_bound( pointer_expr, dereference_type, ns, size_of_expr(dereference_type, ns))); dereference_callback.dereference_failure( "pointer dereference", "dynamic object upper bound", tmp_guard); } } } } else if(root_object.id()==ID_integer_address) { // This is stuff like *((char *)5). // This is turned into an access to __CPROVER_memory[...]. if(language_mode==ID_java) { result.value=nil_exprt(); return result; } const symbolt &memory_symbol=ns.lookup(CPROVER_PREFIX "memory"); exprt symbol_expr=symbol_exprt(memory_symbol.name, memory_symbol.type); if(base_type_eq( ns.follow(memory_symbol.type).subtype(), dereference_type, ns)) { // Types match already, what a coincidence! // We can use an index expression. exprt index_expr=index_exprt(symbol_expr, pointer_offset(pointer_expr)); index_expr.type()=ns.follow(memory_symbol.type).subtype(); result.value=index_expr; } else if(dereference_type_compare( ns.follow(memory_symbol.type).subtype(), dereference_type)) { exprt index_expr=index_exprt(symbol_expr, pointer_offset(pointer_expr)); index_expr.type()=ns.follow(memory_symbol.type).subtype(); result.value=typecast_exprt(index_expr, dereference_type); } else { // We need to use byte_extract. // Won't do this without a commitment to an endianness. if(config.ansi_c.endianness==configt::ansi_ct::endiannesst::NO_ENDIANNESS) { } else { exprt byte_extract(byte_extract_id(), dereference_type); byte_extract.copy_to_operands( symbol_expr, pointer_offset(pointer_expr)); result.value=byte_extract; } } } else { // something generic -- really has to be a symbol address_of_exprt object_pointer(object); if(o.offset().is_zero()) { equal_exprt equality(pointer_expr, object_pointer); if(ns.follow(equality.lhs().type())!=ns.follow(equality.rhs().type())) equality.lhs().make_typecast(equality.rhs().type()); result.pointer_guard=equality; } else { result.pointer_guard=same_object(pointer_expr, object_pointer); } guardt tmp_guard(guard); tmp_guard.add(result.pointer_guard); valid_check(object, tmp_guard, mode); const typet &object_type=ns.follow(object.type()); const exprt &root_object=o.root_object(); const typet &root_object_type=ns.follow(root_object.type()); exprt root_object_subexpression=root_object; if(dereference_type_compare(object_type, dereference_type) && o.offset().is_zero()) { // The simplest case: types match, and offset is zero! // This is great, we are almost done. result.value=object; if(object_type!=ns.follow(dereference_type)) result.value.make_typecast(dereference_type); } else if(root_object_type.id()==ID_array && dereference_type_compare( root_object_type.subtype(), dereference_type)) { // We have an array with a subtype that matches // the dereferencing type. // We will require well-alignedness! exprt offset; // this should work as the object is essentially the root object if(o.offset().is_constant()) offset=o.offset(); else offset=pointer_offset(pointer_expr); exprt adjusted_offset; // are we doing a byte? mp_integer element_size= dereference_type.id()==ID_empty? pointer_offset_size(char_type(), ns): pointer_offset_size(dereference_type, ns); if(element_size==1) { // no need to adjust offset adjusted_offset=offset; } else if(element_size<=0) { throw "unknown or invalid type size of:\n"+dereference_type.pretty(); } else { exprt element_size_expr= from_integer(element_size, offset.type()); adjusted_offset=binary_exprt( offset, ID_div, element_size_expr, offset.type()); // TODO: need to assert well-alignedness } index_exprt index_expr= index_exprt(root_object, adjusted_offset, root_object_type.subtype()); bounds_check(index_expr, tmp_guard); result.value=index_expr; if(ns.follow(result.value.type())!=ns.follow(dereference_type)) result.value.make_typecast(dereference_type); } else if(get_subexpression_at_offset( root_object_subexpression, o.offset(), dereference_type, ns)) { // Successfully found a member, array index, or combination thereof // that matches the desired type and offset: result.value=root_object_subexpression; } else { // we extract something from the root object result.value=o.root_object(); // this is relative to the root object const exprt offset=pointer_offset(pointer_expr); if(memory_model(result.value, dereference_type, tmp_guard, offset)) { // ok, done } else { if(options.get_bool_option("pointer-check")) { std::string msg="memory model not applicable (got `"; msg+=from_type(ns, "", result.value.type()); msg+="', expected `"; msg+=from_type(ns, "", dereference_type); msg+="')"; dereference_callback.dereference_failure( "pointer dereference", msg, tmp_guard); } return valuet(); // give up, no way that this is ok } } } return result; }
void goto_checkt::bounds_check( const index_exprt &expr, const guardt &guard) { if(!enable_bounds_check) return; if(expr.find("bounds_check").is_not_nil() && !expr.get_bool("bounds_check")) return; typet array_type=ns.follow(expr.array().type()); if(array_type.id()==ID_pointer) return; // done by the pointer code else if(array_type.id()==ID_incomplete_array) throw "index got incomplete array"; else if(array_type.id()!=ID_array && array_type.id()!=ID_vector) throw "bounds check expected array or vector type, got " +array_type.id_string(); std::string name=array_name(expr.array()); const exprt &index=expr.index(); object_descriptor_exprt ode; ode.build(expr, ns); if(index.type().id()!=ID_unsignedbv) { // we undo typecasts to signedbv if(index.id()==ID_typecast && index.operands().size()==1 && index.op0().type().id()==ID_unsignedbv) { // ok } else { mp_integer i; if(!to_integer(index, i) && i>=0) { // ok } else { exprt effective_offset=ode.offset(); if(ode.root_object().id()==ID_dereference) { exprt p_offset=pointer_offset( to_dereference_expr(ode.root_object()).pointer()); assert(p_offset.type()==effective_offset.type()); effective_offset=plus_exprt(p_offset, effective_offset); } exprt zero=gen_zero(ode.offset().type()); assert(zero.is_not_nil()); // the final offset must not be negative binary_relation_exprt inequality(effective_offset, ID_ge, zero); add_guarded_claim( inequality, name+" lower bound", "array bounds", expr.find_source_location(), expr, guard); } } } if(ode.root_object().id()==ID_dereference) { const exprt &pointer= to_dereference_expr(ode.root_object()).pointer(); if_exprt size( dynamic_object(pointer), typecast_exprt(dynamic_size(ns), object_size(pointer).type()), object_size(pointer)); plus_exprt effective_offset(ode.offset(), pointer_offset(pointer)); assert(effective_offset.op0().type()==effective_offset.op1().type()); assert(effective_offset.type()==size.type()); binary_relation_exprt inequality(effective_offset, ID_lt, size); or_exprt precond( and_exprt( dynamic_object(pointer), not_exprt(malloc_object(pointer, ns))), inequality); add_guarded_claim( precond, name+" upper bound", "array bounds", expr.find_source_location(), expr, guard); return; } const exprt &size=array_type.id()==ID_array ? to_array_type(array_type).size() : to_vector_type(array_type).size(); if(size.is_nil()) { // Linking didn't complete, we don't have a size. // Not clear what to do. } else if(size.id()==ID_infinity) { } else if(size.is_zero() && expr.array().id()==ID_member) { // a variable sized struct member } else { binary_relation_exprt inequality(index, ID_lt, size); // typecast size if(inequality.op1().type()!=inequality.op0().type()) inequality.op1().make_typecast(inequality.op0().type()); // typecast size if(inequality.op1().type()!=inequality.op0().type()) inequality.op1().make_typecast(inequality.op0().type()); add_guarded_claim( inequality, name+" upper bound", "array bounds", expr.find_source_location(), expr, guard); } }