int test_gc() { build_hash(); gc_sweep(gc); build_hash(); gc_sweep(gc); return 0; }
void jl_gc_collect(void) { allocd_bytes = 0; if (is_gc_enabled) { JL_SIGATOMIC_BEGIN(); #ifdef GCTIME double t0 = clock_now(); #endif gc_mark(); #ifdef GCTIME ios_printf(ios_stderr, "mark time %.3f ms\n", (clock_now()-t0)*1000); #endif #if defined(MEMPROFILE) all_pool_stats(); big_obj_stats(); #endif #ifdef GCTIME t0 = clock_now(); #endif sweep_weak_refs(); gc_sweep(); #ifdef GCTIME ios_printf(ios_stderr, "sweep time %.3f ms\n", (clock_now()-t0)*1000); #endif run_finalizers(); JL_SIGATOMIC_END(); #ifdef OBJPROFILE print_obj_profile(); htable_reset(&obj_counts, 0); #endif } }
void jl_gc_collect(void) { size_t actual_allocd = allocd_bytes; total_allocd_bytes += allocd_bytes; allocd_bytes = 0; if (is_gc_enabled) { JL_SIGATOMIC_BEGIN(); jl_in_gc = 1; #if defined(GCTIME) || defined(GC_FINAL_STATS) double t0 = clock_now(); #endif gc_mark(); #ifdef GCTIME JL_PRINTF(JL_STDERR, "mark time %.3f ms\n", (clock_now()-t0)*1000); #endif #if defined(MEMPROFILE) all_pool_stats(); big_obj_stats(); #endif #ifdef GCTIME t0 = clock_now(); #endif sweep_weak_refs(); gc_sweep(); #ifdef GCTIME JL_PRINTF(JL_STDERR, "sweep time %.3f ms\n", (clock_now()-t0)*1000); #endif int nfinal = to_finalize.len; run_finalizers(); jl_in_gc = 0; JL_SIGATOMIC_END(); #if defined(GC_FINAL_STATS) total_gc_time += (clock_now()-t0); total_freed_bytes += freed_bytes; #endif #ifdef OBJPROFILE print_obj_profile(); htable_reset(&obj_counts, 0); #endif // tune collect interval based on current live ratio #if defined(MEMPROFILE) jl_printf(JL_STDERR, "allocd %ld, freed %ld, interval %ld, ratio %.2f\n", actual_allocd, freed_bytes, collect_interval, (double)freed_bytes/(double)actual_allocd); #endif if (freed_bytes < (7*(actual_allocd/10))) { if (collect_interval <= 2*(max_collect_interval/5)) collect_interval = 5*(collect_interval/2); } else { collect_interval = default_collect_interval; } freed_bytes = 0; // if a lot of objects were finalized, re-run GC to finish freeing // their storage if possible. if (nfinal > 100000) jl_gc_collect(); } }
void gc_collect_end(void) { gc_deal_with_stack_overflow(); gc_sweep(); MP_STATE_MEM(gc_last_free_atb_index) = 0; MP_STATE_MEM(gc_lock_depth)--; GC_EXIT(); }
void gc(void) { fprintf(stderr, "%s\n", "running gc..."); gc_mark(); gc_sweep(); fprintf(stderr, "%s\n", "finished gc"); }
void gc_collect() { gc_wait(); gc_mark(); gc_sweep(); last_collect = _systime_millis; gc_checktime = 0; gc_signal(); }
void gc_collect_pipe(Object* data) { long prev_num = vm->heap_num; gc_mark_object(data); gc_sweep(NULL); vm->heap_max = prev_num * 2; debug("Collected %ld objects, %ld remaining.\n", prev_num - vm->heap_num, vm->heap_num); }
void gc_collect() { int prev_num = heap_num; //printf("gc %p ptr %p\n", &prev_num+1, ptr);// スタックトップ gc_mark(get_stack_top(), NULL); gc_sweep(); heap_max = prev_num * 2; debug("Collected %d objects, %d remaining.\n", prev_num - heap_num, heap_num); }
Object* gc_collect_end_vm(Object* data, VM* _vm) { long prev_num = vm->heap_num; debug2("gc mark\n"); gc_mark_object(data); debug2("gc sweep\n"); gc_sweep(_vm); vm->heap_max = prev_num * 2; debug("Collected %ld objects, %ld moving.\n", prev_num - vm->heap_num, vm->heap_num); return data; }
void gc_collect() { long prev_num = vm->heap_num; debug2("gc mark\n"); gc_mark(); debug2("gc sweep\n"); gc_sweep(NULL); vm->heap_max = prev_num * 2; debug("Collected %ld objects, %ld remaining.\n", prev_num - vm->heap_num, vm->heap_num); }
void jl_gc_collect(void) { allocd_bytes = 0; if (is_gc_enabled) { freed_bytes = 0; JL_SIGATOMIC_BEGIN(); #if defined(GCTIME) || defined(GC_FINAL_STATS) double t0 = clock_now(); #endif gc_mark(); #ifdef GCTIME JL_PRINTF(JL_STDERR, "mark time %.3f ms\n", (clock_now()-t0)*1000); #endif #if defined(MEMPROFILE) all_pool_stats(); big_obj_stats(); #endif #ifdef GCTIME t0 = clock_now(); #endif sweep_weak_refs(); gc_sweep(); #ifdef GCTIME JL_PRINTF(JL_STDERR, "sweep time %.3f ms\n", (clock_now()-t0)*1000); #endif run_finalizers(); JL_SIGATOMIC_END(); #if defined(GC_FINAL_STATS) total_gc_time += (clock_now()-t0); total_freed_bytes += freed_bytes; #endif #ifdef OBJPROFILE print_obj_profile(); htable_reset(&obj_counts, 0); #endif // tune collect interval based on current live ratio if (freed_bytes < ((2*collect_interval)/5)) { if (collect_interval <= (2*max_collect_interval)/5) collect_interval = (5*collect_interval)/2; } else { collect_interval = default_collect_interval; } } }
static void gc_mark_and_sweep(void) { size_t n_collected; SCM_BEGIN_GC_SUBCONTEXT(); CDBG((SCM_DBG_GC, "[ gc start ]")); gc_mark(); n_collected = gc_sweep(); if (n_collected < l_heap_alloc_threshold) { CDBG((SCM_DBG_GC, "enough number of free cells cannot be collected. allocating new heap.")); add_heap(); } SCM_END_GC_SUBCONTEXT(); }
/* Though immediate values and symbols are GC safe even if not being * explicitly protected, the condition may vary according to build * configuration or future specification changes. So libsscm users should * explicitly protect such objects. -- YamaKen 2007-01-26 */ SCM_EXPORT scm_bool scm_gc_protectedp(ScmObj obj) { ScmObj **slot; /* constants or objects referred from registers or stack */ if ( #if SCM_USE_STORAGE_COMPACT SCM_IMMP(obj) #else SCM_CONSTANTP(obj) #endif || GCROOTS_is_protected(l_gcroots_ctx, (void *)obj)) return scm_true; /* referred from static variables */ if (l_protected_vars) { for (slot = l_protected_vars; slot < &l_protected_vars[l_protected_vars_size]; slot++) { if (*slot && **slot == obj) return scm_true; } } /* referred from on-heap objects */ if (scm_gc_protected_contextp()) { /* mark registers, stack and global vars */ gc_mark(); } else { /* doesn't mark registers and stack */ gc_mark_global_vars(); } gc_sweep(); #if SCM_USE_STORAGE_COMPACT return !SCM_CELL_FREECELLP(SCM_UNTAG_PTR(obj)); #else return !SCM_FREECELLP(obj); #endif }
/* * GC collection. */ extern void GC_collect(void) { // Is collection enabled? if (!gc_enabled) return; // Initialize marking gc_debug("collect [stage=init_marks]"); gc_mark_init(); gc_debug("collect [stage=mark]"); struct gc_root_s root_0; gc_root_t root = &root_0; root->ptr = (void *)gc_stacktop(); root->size = gc_stackbottom - root->ptr; root->ptrptr = &root->ptr; root->sizeptr = &root->size; root->elemsize = 1; root->next = gc_roots; gc_root_t roots = root; gc_mark(roots); gc_sweep(); }
void gc_ms() { if(DEBUG) printf("begin_mark_sweep\n"); gc_mark(); gc_sweep(); }
void gc_collect_end(void) { gc_deal_with_stack_overflow(); gc_sweep(); gc_unlock(); }
void gc_collect_end(void) { gc_deal_with_stack_overflow(); gc_sweep(); MP_STATE_MEM(gc_last_free_atb_index) = 0; gc_unlock(); }
/* GC(ガベージコレクション)を行う */ void startGC(void) { gc_mark(); /* マーク */ gc_sweep(); /* スイープ */ }
//REMEBER TO ZERO EVERYTHING!! PObject *gc_alloc(uint8_t type, uint8_t flags, uint16_t size, uint8_t onheap) { uint16_t tsize; uint16_t bfree; PObject *obj = NULL, *prev, *cur, *next; gc_wait(); PThread *pth = PTHREAD_CURRENT(); #if VM_DEBUG int32_t thid = (pth) ? ((int32_t)(vosThGetId(pth->th))) : -1; #endif tsize = GC_ALIGN_SIZE(size);//size +(GC_ALIGNMENT- size % GC_ALIGNMENT); debug( "> th (%i): >>>>ALLOC %i of %i [f:%i,t:%i,h:%i]\r\n", thid, type, size, hfreemem, tsize, heapblocks); if (hfreemem < tsize) { /* no space, fail */ goto exit_alloc; } else { /* normal mark & sweep */ if (gc_checktime || hfreemem < GC_TRIGGER_MIN) { info( "> th (%i) %x: >>> COLLECTING\r\n", thid, pth); gc_mark(); gc_sweep(); last_collect = _systime_millis; gc_checktime = 0; } next = cur = GCH_NEXT(hedge); prev = PNT(hedge); do { bfree = GCH_SIZE(cur); debug( "* check block %x/%x/%x of size %i/%i\n", cur, prev, hedge, bfree, tsize); if (bfree >= tsize) { /*found free block: split or give whole */ obj = cur; if (bfree - tsize < 16) { debug( "* found full block %x/%x/%x next is %x\n", cur, prev, hedge, GCH_NEXT(cur)); /* don't split */ tsize = bfree; cur = GCH_NEXT(cur); hfblocks--; } else { /* split */ debug( "* found splittable block %x/%x/%x new is %x\n", cur, prev, hedge, PNT(TNP(cur) + tsize)); cur = PNT(TNP(cur) + tsize); //(PObject *)(((uint8_t *)cur) + tsize); if (obj == PNT(hedge)) { debug( "* block is hedge, new hedge is %x\n", cur); hedge = (uint8_t *)cur; GCH_SIZE_SET(PNT(hedge), MIN(0xffff, (uint32_t)(hend - hedge))); if (prev == obj) { //set to self debug( "* block is hedge, no other free blocks\n"); GCH_NEXT_SET(PNT(hedge), PNT(hedge)); //prev = PNT(hedge); break; } else { //set to next of hedge before alloc debug( "* block is hedge, other free blocks, prev is %x so next is still %x\n", prev, GCH_NEXT(obj)); GCH_NEXT_SET(PNT(hedge), GCH_NEXT(obj)); } } else { debug( "* block is not hedge: next is %x\n", GCH_NEXT(obj)); GCH_NEXT_SET(cur, GCH_NEXT(obj)); GCH_SIZE_SET(cur, bfree - tsize); //if cur==hedge np } } //set prev to current debug( "* block allocated, update prev %x setting next to %x\n", prev, cur); GCH_NEXT_SET(prev, cur); break; } else { prev = cur; cur = GCH_NEXT(cur); } } while (cur != next); } if (obj == NULL) { goto exit_alloc; } //zero everything memset(((uint8_t *)obj), 0, tsize); //set on stage if (onheap) { //allocate on the heap list uint16_t heapn = (TNP(obj) - TNP(hbase)) / GC_HEAPSIZE; PObject *gcheap = _gcheaps[heapn]; if (!gcheap) { _gcheaps[heapn] = obj; GCH_HEAP_NEXT_SET(obj, obj); GCH_HEAP_PREV_SET(obj, obj); debug( "alloc %x first on heap %i @ %x\n", obj, heapn, obj); } else { //gcheap = _gcheaps[heapn]; next = GCH_HEAP_NEXT(gcheap); GCH_HEAP_PREV_SET(next, obj); GCH_HEAP_NEXT_SET(gcheap, obj); GCH_HEAP_PREV_SET(obj, gcheap); GCH_HEAP_NEXT_SET(obj, next); debug( "alloc %x,on heap %i @ %x with next %x prev %x\n", obj, heapn, gcheap, next, gcheap); } heapblocks[heapn]++; } else { //allocate to system or thread if (pth) { GCH_NEXT_SET(obj, pth->stage); pth->stage = obj; } GCH_FLAG_SET(obj, GC_STAGED); } //set gc data GCH_SIZE_SET(obj, tsize); obj->header.type = type; obj->header.flags = flags; //statistics hfreemem -= tsize; hblocks++; debug( "alloced %x -> %x <- %x\n", obj, GCH_HEAP_NEXT(obj), GCH_HEAP_PREV(obj)); debug( "> th (%i)%x: end alloc %x->%x %x %x edge %i\n", thid, pth, obj, GCH_NEXT(obj), GCH_FLAG(obj), obj->header.gch.flags, PNTD(hedge)); exit_alloc: //TODO: obj == NULL --> raise system exception: out of memory! //debug(">>>>>ALLOCED %i\r\n", obj); //debug("o %i\n",obj ? 1:0); gc_signal(); //gc_trace(); return obj; }
void gc_work() { extern struct st_table *class_tbl; jmp_buf save_regs_gc_mark; SET_STACK_END; malloc_memory_increase = 0; malloc_object_increase = 0; if( GC_STOP ) { rabbit_heap_stretch(); return; } ++GC_work_count; // クラスをマークする var_tbl_mark( class_tbl ); // 使用中のスコープから参照できるオブジェクトをマークする active_scope_foreach( scope_tbl_mark ); { // リテラルをマークする struct object_list *next; next = Literal_List; while( next != NULL ) { if( !IS_PTR_VALUE(next->value) ) { rabbit_bug( "(GC)ポインタ型では無いオブジェクトがリテラルリストにいました(%d)", VALUE_TYPE(next->value) ); } gc_mark(next->value); next = next->next; } } { // グローバル変数をマークする struct gc_list *next; for( next = GC_Global_List; next != NULL; next = next->next ) { gc_mark(*next->value); } } // Cのスタックをマークする if( (VALUE*)STACK_END < rabbit_gc_stack_start ) { gc_stack_mark( (VALUE*)STACK_END, rabbit_gc_stack_start ); } else { gc_stack_mark( rabbit_gc_stack_start, (VALUE*)STACK_END+1 ); } { // レジスタ内をマークする FLUSH_REGISTER_WINDOWS; setjmp(save_regs_gc_mark); VALUE *ptr = (VALUE*)save_regs_gc_mark; long n = sizeof(save_regs_gc_mark) / sizeof(VALUE *); while (n--) { if( is_pointer_to_heap_value((void*)*ptr) ) { gc_mark(*ptr); } ptr++; } } gc_sweep(); }
void gc_collect() { gc_mark(young); gc_sweep(young); }