shaderparameter_t* shader_get_param(shaderprogram_t* program, const char* name, ShaderParamType type) { GLint paramIndex = 0; GLint paramSize = 0; GLenum paramType; switch(type) { default: case ShaderParamType_Uniform: { GLsizei nameLength; paramIndex = glGetUniformLocation(program->program, name); glGetActiveUniform(program->program, paramIndex, 0, &nameLength, ¶mSize, ¶mType, nullptr); FOUNDATION_ASSERT(paramSize > 0); } case ShaderParamType_UniformBlock: { paramIndex = glGetUniformBlockIndex(program->program, name); glGetActiveUniformBlockiv(program->program, paramIndex, GL_UNIFORM_BLOCK_DATA_SIZE, ¶mSize); FOUNDATION_ASSERT(paramSize > 0); } } shaderparameter_t* param = (shaderparameter_t*) memory_allocate(sizeof(shaderparameter_t), 4, MEMORY_PERSISTENT); param->index = paramIndex; param->size = paramSize; param->gltype = paramType; param->name = string_clone(name); param->type = type; return param; }
static void _socket_stream_truncate(stream_t* stream, size_t size) { FOUNDATION_ASSERT(stream); FOUNDATION_ASSERT(stream->type == STREAMTYPE_SOCKET); FOUNDATION_UNUSED(stream); FOUNDATION_UNUSED(size); }
object_t objectmap_reserve( objectmap_t* map ) { uint64_t idx, next, id; FOUNDATION_ASSERT( map ); /*lint -esym(613,pool) */ //Reserve spot in array //TODO: Look into double-ended implementation with allocation from tail and free push to head do { idx = atomic_load64( &map->free ); if( idx >= map->size ) { log_error( 0, ERROR_OUT_OF_MEMORY, "Pool full, unable to reserve id" ); return 0; } next = ((uintptr_t)map->map[idx]) >> 1; } while( !atomic_cas64( &map->free, next, idx ) ); //Sanity check that slot isn't taken FOUNDATION_ASSERT_MSG( (intptr_t)(map->map[idx]) & 1, "Map failed sanity check, slot taken after reserve" ); map->map[idx] = 0; //Allocate ID id = 0; do { id = atomic_incr64( &map->id ) & map->id_max; //Wrap-around handled by masking } while( !id ); //Make sure id stays within correct bits (if fails, check objectmap allocation and the mask setup there) FOUNDATION_ASSERT( ( ( id << map->size_bits ) & map->mask_id ) == ( id << map->size_bits ) ); return ( id << map->size_bits ) | idx; /*lint +esym(613,pool) */ }
static size_t _socket_stream_size(stream_t* stream) { FOUNDATION_ASSERT(stream); FOUNDATION_ASSERT(stream->type == STREAMTYPE_SOCKET); FOUNDATION_UNUSED(stream); return 0; }
static void _socket_stream_flush(stream_t* stream) { FOUNDATION_ASSERT(stream); FOUNDATION_ASSERT(stream->type == STREAMTYPE_SOCKET); _socket_stream_doflush((socket_stream_t*)stream); }
void stream_seek( stream_t* stream, int64_t offset, stream_seek_mode_t direction ) { FOUNDATION_ASSERT( stream ); if( stream->sequential ) return; FOUNDATION_ASSERT( stream->vtable->seek ); stream->vtable->seek( stream, offset, direction ); }
uint64_t stream_read( stream_t* stream, void* buffer, uint64_t num_bytes ) { FOUNDATION_ASSERT( stream ); if( !( stream->mode & STREAM_IN ) ) return 0; FOUNDATION_ASSERT( stream->vtable->read ); return stream->vtable->read( stream, buffer, num_bytes ); }
void lua_release_execution_right(lua_t* env) { FOUNDATION_ASSERT(atomic_load64(&env->executing_thread) == thread_id()); FOUNDATION_ASSERT(env->executing_count > 0); if (!--env->executing_count) { atomic_store64(&env->executing_thread, 0); semaphore_post(&env->execution_right); } }
string_t stream_read_line(stream_t* stream, char delimiter) { char buffer[128]; char* outbuffer = 0; size_t outsize = 0; size_t cursize = 0; size_t read, i; size_t want_read = 128; if (!(stream->mode & STREAM_IN)) return (string_t) { 0, 0 }; //Need to read one byte at a time since we can't scan back if overreading if (stream_is_sequential(stream)) want_read = 1; while (!stream_eos(stream)) { read = stream->vtable->read(stream, buffer, want_read); if (!read) break; for (i = 0; i < read; ++i) { if (buffer[i] == delimiter) break; } if (cursize + i > outsize) { size_t nextsize; if (!outbuffer) { nextsize = (i >= 32 ? i + 1 : (i > 1 ? i + 1 : 32)); outbuffer = memory_allocate(0, nextsize, 0, MEMORY_PERSISTENT); } else { nextsize = (outsize < 511 ? 512 : outsize + 513); //Always aligns to 512 multiples FOUNDATION_ASSERT(!(nextsize % 512)); outbuffer = memory_reallocate(outbuffer, nextsize, 0, outsize + 1); } outsize = nextsize - 1; } if (i) { memcpy(outbuffer + cursize, buffer, i); //lint !e613 cursize += i; } if (i < read) { if ((i + 1) < read) { //Sequential should never end up here reading one byte at a time FOUNDATION_ASSERT(!stream_is_sequential(stream)); stream_seek(stream, (ssize_t)(1 + i) - (ssize_t)read, STREAM_SEEK_CURRENT); } break; } } if (outbuffer) outbuffer[cursize] = 0; return (string_t) { outbuffer, cursize }; }
uint64_t stream_write( stream_t* stream, const void* buffer, uint64_t num_bytes ) { FOUNDATION_ASSERT( stream ); if( !( stream->mode & STREAM_OUT ) ) return 0; FOUNDATION_ASSERT( stream->vtable->write ); return stream->vtable->write( stream, buffer, num_bytes ); }
void* objectmap_raw_lookup( const objectmap_t* map, unsigned int idx ) { uintptr_t ptr; /*lint --e{613} Performance path (no ptr checks)*/ FOUNDATION_ASSERT( map ); FOUNDATION_ASSERT( idx < map->size ); ptr = (uintptr_t)map->map[idx]; return ( ptr & 1 ) ? 0 : (void*)ptr; }
static size_t _socket_stream_tell(stream_t* stream) { socket_stream_t* sockstream; socket_t* sock; FOUNDATION_ASSERT(stream); FOUNDATION_ASSERT(stream->type == STREAMTYPE_SOCKET); sockstream = (socket_stream_t*)stream; sock = sockstream->socket; return sock->bytes_read; }
char* stream_read_line( stream_t* stream, char delimiter ) { char buffer[128]; char* outbuffer; int outsize = 32; int cursize = 0; int read, i; int want_read = 128; FOUNDATION_ASSERT( stream ); if( !( stream->mode & STREAM_IN ) ) return 0; FOUNDATION_ASSERT( stream->vtable->read ); if( stream_is_sequential( stream ) ) //Need to read one byte at a time since we can't scan back if overreading want_read = 1; outbuffer = memory_allocate( outsize + 1, 0, MEMORY_PERSISTENT ); while( !stream_eos( stream ) ) { read = (int)stream->vtable->read( stream, buffer, want_read ); if( !read ) break; for( i = 0; i < read; ++i ) { if( buffer[i] == delimiter ) break; } if( cursize + i > outsize ) { outsize += 512; outbuffer = memory_reallocate( outbuffer, outsize + 1, 0, cursize ); } memcpy( outbuffer + cursize, buffer, i ); cursize += i; if( i < read ) { if( ( i + 1 ) < read ) { FOUNDATION_ASSERT( !stream_is_sequential( stream ) ); //Sequential should never end up here reading one byte at a time stream_seek( stream, 1 + i - read, STREAM_SEEK_CURRENT ); } break; } } outbuffer[cursize] = 0; return outbuffer; }
void objectmap_set( objectmap_t* map, object_t id, void* object ) { uint64_t idx; FOUNDATION_ASSERT( map ); /*lint -esym(613,pool) */ idx = (int)( id & map->mask_index ); //Sanity check, can't set free slot, and non-free slot should be initialized to 0 in reserve function FOUNDATION_ASSERT( !(((uintptr_t)map->map[idx]) & 1 ) ); FOUNDATION_ASSERT( !((uintptr_t)map->map[idx]) ); if( !map->map[idx] ) map->map[idx] = object; /*lint +esym(613,pool) */ }
void profile_end_block( void ) { uint32_t block_index = get_thread_profile_block(); profile_block_t* block; if( !_profile_enable || !block_index ) return; block = GET_BLOCK( block_index ); block->data.end = time_current() - _profile_ground_time; if( block->previous ) { unsigned int processor; profile_block_t* current = block; profile_block_t* previous = GET_BLOCK( block->previous ); profile_block_t* parent; unsigned int current_index = block_index; unsigned int parent_index; while( previous->child != current_index ) { current_index = current->previous; //Walk sibling list backwards current = GET_BLOCK( current_index ); previous = GET_BLOCK( current->previous ); #if PROFILE_ENABLE_SANITY_CHECKS FOUNDATION_ASSERT( current_index != 0 ); FOUNDATION_ASSERT( current->previous != 0 ); #endif } parent_index = current->previous; //Previous now points to parent parent = GET_BLOCK( parent_index ); #if PROFILE_ENABLE_SANITY_CHECKS FOUNDATION_ASSERT( parent_index != block_index ); #endif set_thread_profile_block( parent_index ); processor = thread_hardware(); if( parent->data.processor != processor ) { const char* message = parent->data.name; //Thread migrated, split into new block profile_end_block(); profile_begin_block( message ); } } else { _profile_put_root_block( block_index ); set_thread_profile_block( 0 ); } }
uint64_t stream_read_line_buffer( stream_t* stream, char* dest, unsigned int count, char delimiter ) { int i, read, total, limit; FOUNDATION_ASSERT( stream ); FOUNDATION_ASSERT( dest ); if( !( stream->mode & STREAM_IN ) || ( count < 2 ) ) return 0; FOUNDATION_ASSERT( stream->vtable->read ); total = 0; --count; while( !stream_eos( stream ) ) { limit = count - total; if( limit > 128 ) limit = 128; if( !limit ) break; if( stream_is_sequential( stream ) ) //Need to read one byte at a time since we can't scan back if overreading limit = 1; read = (int)stream->vtable->read( stream, dest + total, limit ); if( !read ) break; for( i = 0; i < read; ++i ) { if( dest[total+i] == delimiter ) break; } total += i; if( i < read ) { if( ( i + 1 ) < read ) { FOUNDATION_ASSERT( !stream_is_sequential( stream ) ); //Sequential should never end up here reading one byte at a time stream_seek( stream, 1 + i - read, STREAM_SEEK_CURRENT ); } break; } } dest[total] = 0; return total; }
shader_t* shader_create(const char* data, const int64_t dataSize, ShaderType type) { GLuint shaderHandle = glCreateShader(type == VertexShader ? GL_VERTEX_SHADER : GL_FRAGMENT_SHADER); mint_CHECKFORGLERROR; { //Shader source should never exceed 4Gb anyway, riiight? FOUNDATION_ASSERT(dataSize < 0xffffffff); int32_t shaderSourceSize = (int32_t) dataSize; glShaderSource(shaderHandle, 1, &data, &shaderSourceSize); mint_CHECKFORGLERROR; glCompileShader(shaderHandle); mint_CHECKFORGLERROR; int32_t logLength = 0, success; glGetShaderiv(shaderHandle, GL_COMPILE_STATUS, &success); glGetShaderiv(shaderHandle, GL_INFO_LOG_LENGTH, &logLength); if (success != GL_TRUE && logLength > 1) { int32_t charsWritten; GLchar* log = (GLchar*) memory_allocate(sizeof(GLchar) * (logLength + 1), 4, MEMORY_TEMPORARY); glGetShaderInfoLog(shaderHandle, logLength + 1, &charsWritten, log); log_errorf(ERROR_NONE, "Compiler results: %s", (char*) log); memory_deallocate(log); } } shader_t* shader = (shader_t*) memory_allocate(sizeof(shader_t), 4, MEMORY_PERSISTENT); shader->buffer = shaderHandle; return shader; }
unsigned int stream_available_read( stream_t* stream ) { FOUNDATION_ASSERT( stream ); if( stream->vtable->available_read ) return (unsigned int)stream->vtable->available_read( stream ); return (unsigned int)( stream_size( stream ) - stream_tell( stream ) ); }
void stream_determine_binary_mode( stream_t* stream, unsigned int num ) { char* buf; int64_t cur; uint64_t actual_read, i; FOUNDATION_ASSERT( stream ); if( !( stream->mode & STREAM_IN ) || stream_is_sequential( stream ) ) return; if( !num ) num = 8; buf = memory_allocate( num, 0, MEMORY_TEMPORARY ); memset( buf, 32, num ); cur = stream_tell( stream ); actual_read = stream_read( stream, buf, num ); stream_seek( stream, cur, STREAM_SEEK_BEGIN ); stream->mode &= ~STREAM_BINARY; for( i = 0; i < actual_read; ++i ) { //TODO: What about UTF-8? if( ( ( buf[i] < 0x20 ) && ( buf[i] != 0x09 ) && ( buf[i] != 0x0a ) && ( buf[i] != 0x0d ) ) || ( buf[i] > 0x7e ) ) { stream->mode |= STREAM_BINARY; break; } } memory_deallocate( buf ); }
static uint64_t _pipe_stream_write( stream_t* stream, const void* source, uint64_t num ) { stream_pipe_t* pipestream = (stream_pipe_t*)stream; FOUNDATION_ASSERT( stream->type == STREAMTYPE_PIPE ); #if FOUNDATION_PLATFORM_WINDOWS if( pipestream->handle_write && ( ( pipestream->mode & STREAM_OUT ) != 0 ) ) { uint64_t total_written = 0; do { unsigned long num_written = 0; if( !WriteFile( pipestream->handle_write, pointer_offset_const( source, total_written ), (unsigned int)( num - total_written ), &num_written, 0 ) ) break; total_written += num_written; } while( total_written < num ); return total_written; } #elif FOUNDATION_PLATFORM_POSIX if( pipestream->fd_write && ( ( pipestream->mode & STREAM_OUT ) != 0 ) ) { uint64_t total_written = 0; do { ssize_t num_written = write( pipestream->fd_write, pointer_offset_const( source, total_written ), (size_t)( num - total_written ) ); if( num_written < 0 ) break; total_written += num_written; } while( total_written < num ); return total_written; } #endif return 0; }
bool mutex_lock( mutex_t* mutex ) { FOUNDATION_ASSERT( mutex ); #if !BUILD_DEPLOY profile_trylock( mutex->name ); #endif #if FOUNDATION_PLATFORM_WINDOWS EnterCriticalSection( (CRITICAL_SECTION*)mutex->csection ); #elif FOUNDATION_PLATFORM_POSIX if( pthread_mutex_lock( &mutex->mutex ) != 0 ) { FOUNDATION_ASSERT_FAILFORMAT( "unable to lock mutex %s", mutex->name ); return false; } #else # error mutex_lock not implemented #endif #if !BUILD_DEPLOY profile_lock( mutex->name ); #endif FOUNDATION_ASSERT_MSGFORMAT( !mutex->lockcount || ( thread_id() == mutex->lockedthread ), "Mutex lock acquired with lockcount > 0 (%d) and locked thread not self (%llx != %llx)", mutex->lockcount, mutex->lockedthread, thread_id() ); if( !mutex->lockcount ) mutex->lockedthread = thread_id(); ++mutex->lockcount; return true; }
void mutex_signal( mutex_t* mutex ) { FOUNDATION_ASSERT( mutex ); #if !BUILD_DEPLOY profile_signal( mutex->name ); #endif #if FOUNDATION_PLATFORM_WINDOWS SetEvent( mutex->event ); #elif FOUNDATION_PLATFORM_POSIX mutex_lock( mutex ); mutex->pending = true; int ret = pthread_cond_broadcast( &mutex->cond ); if( ret != 0 ) log_warnf( 0, WARNING_SYSTEM_CALL_FAIL, "Unable to signal mutex '%s': %s (%d)", mutex->name, system_error_message( ret ), ret ); mutex_unlock( mutex ); #else # error mutex_signal not implemented #endif }
stream_t* stream_clone( stream_t* stream ) { FOUNDATION_ASSERT( stream ); if( stream->vtable->clone ) return stream->vtable->clone( stream ); return 0; }
static uint64_t _pipe_stream_read( stream_t* stream, void* dest, uint64_t num ) { stream_pipe_t* pipestream = (stream_pipe_t*)stream; FOUNDATION_ASSERT( stream->type == STREAMTYPE_PIPE ); #if FOUNDATION_PLATFORM_WINDOWS if( pipestream->handle_read && ( ( pipestream->mode & STREAM_IN ) != 0 ) ) { uint64_t total_read = 0; do { unsigned long num_read = 0; if( !ReadFile( pipestream->handle_read, pointer_offset( dest, total_read ), (unsigned int)( num - total_read ), &num_read, 0 ) ) break; total_read += num_read; } while( total_read < num ); return total_read; } #elif FOUNDATION_PLATFORM_POSIX if( pipestream->fd_read && ( ( pipestream->mode & STREAM_IN ) != 0 ) ) { uint64_t total_read = 0; do { ssize_t num_read = read( pipestream->fd_read, pointer_offset( dest, total_read ), (size_t)( num - total_read ) ); if( num_read < 0 ) break; total_read += num_read; } while( total_read < num ); return total_read; } #endif return 0; }
static void _profile_put_root_block( uint32_t block ) { uint32_t sibling; profile_block_t* self = GET_BLOCK( block ); #if PROFILE_ENABLE_SANITY_CHECKS FOUNDATION_ASSERT( self->sibling == 0 ); #endif while( !atomic_cas32( &_profile_root, block, 0 ) ) { do { sibling = atomic_load32( &_profile_root ); } while( sibling && !atomic_cas32( &_profile_root, 0, sibling ) ); if( sibling ) { if( self->sibling ) { uint32_t leaf = self->sibling; while( GET_BLOCK( leaf )->sibling ) leaf = GET_BLOCK( leaf )->sibling; GET_BLOCK( sibling )->previous = leaf; GET_BLOCK( leaf )->sibling = sibling; } else { self->sibling = sibling; } } } }
static NOINLINE config_key_t* config_key( hash_t section, hash_t key, bool create ) { config_key_t new_key = {0}; config_section_t* csection; config_key_t* bucket; int ib, bsize; csection = config_section( section, create ); if( !csection ) { FOUNDATION_ASSERT( !create ); return 0; } bucket = csection->key[ key % CONFIG_KEY_BUCKETS ]; for( ib = 0, bsize = array_size( bucket ); ib < bsize; ++ib ) { /*lint --e{613} array_size( bucket ) in loop condition does the null pointer guard */ if( bucket[ib].name == key ) return bucket + ib; } if( !create ) return 0; new_key.name = key; //TODO: Thread safeness array_push_memcpy( bucket, &new_key ); csection->key[ key % CONFIG_KEY_BUCKETS ] = bucket; return bucket + bsize; }
bool mutex_unlock(mutex_t* mutex) { if (!mutex->lockcount) { log_warnf(0, WARNING_SUSPICIOUS, STRING_CONST("Unable to unlock unlocked mutex %.*s"), (int)mutex->name.length, mutex->name.str); return false; } FOUNDATION_ASSERT(mutex->lockedthread == thread_id()); --mutex->lockcount; #if !BUILD_DEPLOY profile_unlock(mutex->name.str, mutex->name.length); #endif #if FOUNDATION_PLATFORM_WINDOWS LeaveCriticalSection((CRITICAL_SECTION*)mutex->csection); #elif FOUNDATION_PLATFORM_POSIX || FOUNDATION_PLATFORM_PNACL if (pthread_mutex_unlock(&mutex->mutex) != 0) { FOUNDATION_ASSERT_FAILFORMAT("unable to unlock mutex %s", mutex->name.str); return false; } #else # error mutex_unlock not implemented #endif return true; }
bool mutex_try_lock(mutex_t* mutex) { bool was_locked; #if !BUILD_DEPLOY profile_trylock(mutex->name.str, mutex->name.length); #endif #if FOUNDATION_PLATFORM_WINDOWS was_locked = TryEnterCriticalSection((CRITICAL_SECTION*)mutex->csection); #elif FOUNDATION_PLATFORM_POSIX || FOUNDATION_PLATFORM_PNACL was_locked = (pthread_mutex_trylock(&mutex->mutex) == 0); #else # error mutex_try_lock not implemented was_locked = false; #endif #if !BUILD_DEPLOY if (was_locked) profile_lock(mutex->name.str, mutex->name.length); #endif if (was_locked) { FOUNDATION_ASSERT(!mutex->lockcount || (thread_id() == mutex->lockedthread)); if (!mutex->lockcount) mutex->lockedthread = thread_id(); ++mutex->lockcount; } return was_locked; }
void stream_set_binary( stream_t* stream, bool binary ) { FOUNDATION_ASSERT( stream ); if( binary ) stream->mode |= STREAM_BINARY; else stream->mode &= ~STREAM_BINARY; }
void ringbuffer_reset( ringbuffer_t* buffer ) { FOUNDATION_ASSERT( buffer ); buffer->total_read = 0; buffer->total_write = 0; buffer->offset_read = 0; buffer->offset_write = 0; }