void _binpachilti_sink_connect_intern(binpac_sink* sink, const hlt_type_info* type, void** pobj, binpac_parser* parser, hlt_bytes* mtype, hlt_exception** excpt, hlt_execution_context* ctx) { __parser_state* state = hlt_malloc(sizeof(__parser_state)); state->parser = parser; GC_CCTOR(state->parser, hlt_BinPACHilti_Parser, ctx); state->pobj = *pobj; GC_CCTOR_GENERIC(&state->pobj, type, ctx); state->data = 0; state->resume = 0; state->disconnected = 0; state->next = sink->head; sink->head = state; #ifdef DEBUG if ( mtype ) { hlt_string s = hlt_string_decode(mtype, Hilti_Charset_ASCII, excpt, ctx); char* r1 = hlt_string_to_native(s, excpt, ctx); char* r2 = hlt_string_to_native(parser->name, excpt, ctx); DBG_LOG("binpac-sinks", "connected parser %s [%p] to sink %p for MIME type %s", r2, *pobj, sink, r1); hlt_free(r1); hlt_free(r2); } else { char* p = hlt_string_to_native(parser->name, excpt, ctx); DBG_LOG("binpac-sinks", "connected parser %s [%p] to sink %p", p, *pobj, sink); hlt_free(p); } #endif }
void hlt_thread_queue_delete(hlt_thread_queue* queue) { if ( PTHREAD_SPIN_DESTROY(&queue->lock) != 0 ) _fatal_error("cannot destroy lock"); for ( int w = 0; w < queue->writers; w++ ) { batch* b = queue->writer_batches[w]; while ( b ) { batch* next = b->next; hlt_free(b); b = next; } } batch* b = queue->reader_head; while ( b ) { batch* next = b->next; hlt_free(b); b = next; } b = queue->lock_pending_head; while ( b ) { batch* next = b->next; hlt_free(b); b = next; } hlt_free(queue->reader_stats); hlt_free(queue->writer_batches); hlt_free(queue->writer_num_written); hlt_free(queue->writer_stats); hlt_free(queue->lock_writers_terminated); hlt_free(queue); }
void hlt_free_list_delete(hlt_free_list* list) { __hlt_free_list_block* b = list->pool; while ( b ) { __hlt_free_list_block* tmp = b->next; hlt_free(b); b = tmp; } hlt_free(list); }
void hlt_execution_context_delete(hlt_execution_context* ctx) { hlt_exception* excpt = 0; // Do this first, it may still need the context. hlt_timer_mgr_expire(ctx->tmgr, 0, &excpt, ctx); GC_DTOR(ctx->tmgr, hlt_timer_mgr, ctx); __hlt_globals_dtor(ctx); GC_DTOR(ctx->excpt, hlt_exception, ctx); if ( ctx->fiber ) hlt_fiber_delete(ctx->fiber, ctx); if ( ctx->pstate ) __hlt_profiler_state_delete(ctx->pstate); if ( ctx->tcontext ) { GC_DTOR_GENERIC(&ctx->tcontext, ctx->tcontext_type, ctx); } __hlt_fiber_pool_delete(ctx->fiber_pool); if ( ctx->nullbuffer ) __hlt_memory_nullbuffer_delete(ctx->nullbuffer, ctx); hlt_free(ctx); }
void binpachilti_sink_disconnect(binpac_sink* sink, const hlt_type_info* type, void** pobj, hlt_exception** excpt, hlt_execution_context* ctx) { if ( ! sink ) return; __parser_state* s = 0; for ( s = sink->head; s; s = s->next ) { if ( s->pobj == *pobj ) break; } if ( ! s ) // Not found, ignore. return; #ifdef DEBUG char* p = hlt_string_to_native(s->parser->name, excpt, ctx); DBG_LOG("binpac-sinks", "disconnected parser %s [%p] from sink %p", p, *pobj, sink); hlt_free(p); #endif // We don't delete the object here as we may be deep inside the parsing // when disconnecting is called. s->disconnected = 1; }
// Internal version that really deletes a fiber (vs. the external version // that might put the fiber back into a pool to recycle later). static void __hlt_fiber_delete(hlt_fiber* fiber) { assert(fiber->state != RUNNING); hlt_stack_free(fiber->uctx.uc_stack.ss_sp, fiber->uctx.uc_stack.ss_size); hlt_free(fiber); }
static void __unlink_state(binpac_sink* sink, __parser_state* state, hlt_execution_context* ctx) { __parser_state* prev = 0; __parser_state* s = sink->head; while ( s ) { if ( s == state ) break; prev = s; s = s->next; } assert(s); if ( prev ) prev->next = state->next; else sink->head = state->next; GC_CLEAR(state->data, hlt_bytes, ctx); GC_CLEAR(state->resume, hlt_exception, ctx); GC_DTOR_GENERIC(&state->pobj, state->parser->type_info, ctx); state->pobj = 0; GC_CLEAR(state->parser, hlt_BinPACHilti_Parser, ctx); hlt_free(state); }
void hlt_classifier_dtor(hlt_type_info* ti, hlt_classifier* c, hlt_execution_context* ctx) { if ( ! c->rules ) return; for ( int i = 0; i < c->num_rules; i++ ) { hlt_classifier_rule* r = c->rules[i]; for ( int j = 0; j < c->num_fields; j++ ) hlt_free(r->fields[j]); hlt_free(r->fields); GC_DTOR_GENERIC(r->value, c->value_type, ctx); hlt_free(r->value); hlt_free(r); } hlt_free(c->rules); }
void __hlt_fiber_pool_delete(__hlt_fiber_pool* pool) { while ( pool->head ) { hlt_fiber* fiber = pool->head; pool->head = pool->head->next; __hlt_fiber_delete(fiber); } hlt_free(pool); }
void hlt_memory_pool_dtor(hlt_memory_pool* p) { __hlt_memory_pool_block* b; __hlt_memory_pool_block* n; for ( b = p->first.next; b; b = n ) { // Don't delete first. n = b->next; hlt_free(b); } }
void hlt_regexp_dtor(hlt_type_info* ti, hlt_regexp* re, hlt_execution_context* ctx) { for ( int i = 0; i < re->num; i++ ) GC_DTOR(re->patterns[i], hlt_string, ctx); hlt_free(re->patterns); if ( re->num > 0 ) jrx_regfree(&re->regexp); }
void hlt_exception_dtor(hlt_type_info* ti, hlt_exception* excpt, hlt_execution_context* ctx) { if ( excpt->arg ) { GC_DTOR_GENERIC(excpt->arg, excpt->type->argtype, ctx); hlt_free(excpt->arg); } if ( excpt->fiber ) hlt_fiber_delete(excpt->fiber, 0); }
static void __add_parser(hlt_bytes* mt, binpac_parser* parser, hlt_exception** excpt, hlt_execution_context* ctx) { __mime_parser* mp = GC_NEW_REF(__mime_parser, ctx); GC_INIT(mp->parser, parser, hlt_BinPACHilti_Parser, ctx); mp->next = 0; // Deep-copy the pointers, the map needs that. __mime_parser** cmp = hlt_malloc(sizeof(__mime_parser*)); hlt_bytes** cmt = hlt_malloc(sizeof(hlt_bytes*)); *cmt = mt; *cmp = mp; __mime_parser** current = hlt_map_get_default(__binpac_globals_get()->mime_types, &hlt_type_info_hlt_bytes, cmt, &hlt_type_info___mime_parser, 0, excpt, ctx); if ( current ) { mp->next = *current; GC_CCTOR(mp->next, __mime_parser, ctx); } hlt_map_insert(__binpac_globals_get()->mime_types, &hlt_type_info_hlt_bytes, cmt, &hlt_type_info___mime_parser, cmp, excpt, ctx); GC_DTOR(mp, __mime_parser, ctx); hlt_free(cmp); hlt_free(cmt); #ifdef DEBUG hlt_string s = hlt_string_decode(mt, Hilti_Charset_ASCII, excpt, ctx); char* r1 = hlt_string_to_native(s, excpt, ctx); char* r2 = hlt_string_to_native(parser->name, excpt, ctx); DBG_LOG("binpac-sinks", "MIME type %s registered for parser %s", r1, r2); hlt_free(r1); hlt_free(r2); #endif }
void __hlt_memory_nullbuffer_flush(__hlt_memory_nullbuffer* nbuf, hlt_execution_context* ctx) { if ( nbuf->flush_pos >= 0 ) return; #ifdef DEBUG _dbg_mem_raw("nullbuffer_flush", nbuf, nbuf->used, 0, "start", 0, ctx); #endif // Note, flush_pos is examined during flushing by nullbuffer_add(). for ( nbuf->flush_pos = 0; nbuf->flush_pos < nbuf->used; ++nbuf->flush_pos ) { struct __obj_with_rtti x = nbuf->objs[nbuf->flush_pos]; if ( ! x.obj ) // May have been removed. continue; #ifdef DEBUG --__hlt_globals()->num_nullbuffer; #endif __hlt_gchdr* hdr = (__hlt_gchdr*)x.obj; if ( hdr->ref_cnt > 0 ) // Still alive actually. continue; if ( x.ti->obj_dtor ) (*(x.ti->obj_dtor))(x.ti, x.obj, ctx); __hlt_free(x.obj, x.ti->tag, "nullbuffer_flush"); } nbuf->used = 0; if ( nbuf->allocated > __INITIAL_NULLBUFFER_SIZE ) { hlt_free(nbuf->objs); nbuf->allocated = __INITIAL_NULLBUFFER_SIZE; nbuf->objs = (struct __obj_with_rtti*)hlt_malloc(sizeof(struct __obj_with_rtti) * nbuf->allocated); } #ifdef DEBUG _dbg_mem_raw("nullbuffer_flush", nbuf, nbuf->used, 0, "end", 0, ctx); #endif nbuf->flush_pos = -1; }
static void __exception_print(const char* prefix, hlt_exception* e, hlt_execution_context* ctx) { hlt_exception* excpt = 0; // We must not terminate while in here. int old_state; hlt_pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_state); flockfile(stderr); hlt_string s = __exception_render(e, ctx); char* c = hlt_string_to_native(s, &excpt, ctx); fprintf(stderr, "%s%s\n", prefix, c); hlt_free(c); fflush(stderr); funlockfile(stderr); hlt_pthread_setcancelstate(old_state, NULL); }
void __hlt_memory_nullbuffer_delete(__hlt_memory_nullbuffer* nbuf, hlt_execution_context* ctx) { __hlt_memory_nullbuffer_flush(nbuf, ctx); hlt_free(nbuf->objs); hlt_free(nbuf); }
static void __delete_chunk(__chunk* c, hlt_exception** excpt, hlt_execution_context* ctx) { GC_DTOR(c->data, hlt_bytes, ctx); hlt_free(c); }
void* hlt_thread_queue_read(hlt_thread_queue* queue, int timeout) { int block = (timeout == 0); timeout *= 1000; // Turn it into nanoseconds. while ( 1 ) { while ( queue->reader_head ) { // We still have stuff to do, so do it. if ( queue->need_flush ) queue->need_flush = 0; batch* b = queue->reader_head; if ( queue->reader_pos < b->write_pos ) { // Still something in our current batch. ++queue->reader_stats->elems; ++queue->reader_num_read; return b->elems[queue->reader_pos++]; } // Switch to next batch. batch* next = queue->reader_head->next; hlt_free(queue->reader_head); queue->reader_head = next; queue->reader_pos = 0; ++queue->reader_stats->batches; } pthread_testcancel(); // Nothing left anymore, get a currently pending batch. int s; _acquire_lock(queue, &s, 1, 0); ++queue->reader_stats->locked; queue->reader_head = queue->lock_pending_head; queue->reader_pos = 0; queue->lock_pending_head = queue->lock_pending_tail = 0; queue->lock_num_pending = 0; queue->lock_block = 0; // Take the opportunity to check who has terminated. queue->reader_num_terminated = 0; for ( int i = 0; i < queue->writers; ++i ) { if ( queue->lock_writers_terminated[i] ) ++queue->reader_num_terminated; } _release_lock(queue, s, 1, 0); if ( ! queue->reader_head ) { // Nothing was pending actually ... ++queue->reader_stats->blocked; queue->need_flush = 1; if ( timeout <= 0 && ! block ) return 0; if ( hlt_thread_queue_terminated(queue) ) return 0; // Sleep a tiny bit. // pthread_yield(); hlt_util_nanosleep(1000); timeout -= 1000; } } // Can't be reached. assert(0); }