/* FIXME:: the collection should be seperated from the alloation */ void *wspace_alloc(unsigned size, Allocator *allocator) { void *p_obj = NULL; /* if( get_hyper_free_chunk_list()->head == NULL ) INFO2("gc.wspace", "[BEFORE ALLOC]hyper free chunk is EMPTY!!"); */ if(gc_is_specify_con_gc()) gc_sched_collection(allocator->gc, GC_CAUSE_CONCURRENT_GC); /* First, try to allocate object from TLB (thread local chunk) */ p_obj = wspace_try_alloc(size, allocator); if(p_obj){ ((Mutator*)allocator)->new_obj_size += size; /* if( get_hyper_free_chunk_list()->head == NULL ) INFO2("gc.wspace", "[AFTER FIRST ALLOC]hyper free chunk is EMPTY!!"); */ return p_obj; } if(allocator->gc->in_collection) return NULL; vm_gc_lock_enum(); /* after holding lock, try if other thread collected already */ p_obj = wspace_try_alloc(size, allocator); if(p_obj){ vm_gc_unlock_enum(); ((Mutator*)allocator)->new_obj_size += size; /* if( get_hyper_free_chunk_list()->head == NULL ) INFO2("gc.wspace", "[AFTER SECOND ALLOC]hyper free chunk is EMPTY!!"); */ return p_obj; } INFO2("gc.con.info", "[Exhausted Cause] Allocation size is :" << size << " bytes"); GC *gc = allocator->gc; /* gc->cause = GC_CAUSE_MOS_IS_FULL; if(gc_is_specify_con_gc()) gc_relaim_heap_con_mode(gc); else*/ gc_reclaim_heap(gc, GC_CAUSE_MOS_IS_FULL); vm_gc_unlock_enum(); #ifdef SSPACE_CHUNK_INFO printf("Failure size: %x\n", size); #endif p_obj = wspace_try_alloc(size, allocator); /* if( get_hyper_free_chunk_list()->head == NULL ) INFO2("gc.wspace", "[AFTER COLLECTION ALLOC]hyper free chunk is EMPTY!!"); */ if(p_obj) ((Mutator*)allocator)->new_obj_size += size; return p_obj; }
void put_all_fin_on_exit(GC *gc) { Pool *obj_with_fin_pool = gc->finref_metadata->obj_with_fin_pool; Pool *free_pool = gc->finref_metadata->free_pool; /* Because we are manipulating obj_with_fin_pool, GC lock must be hold in case that GC happens */ vm_gc_lock_enum(); /* FIXME: holding gc lock is not enough, perhaps there are mutators that are allocating objects with finalizer * could be fixed as this: * in fspace_alloc() and lspace_alloc() hold gc lock through * allocating mem and adding the objects with finalizer to the pool */ lock(gc->mutator_list_lock); gc_set_obj_with_fin(gc); unlock(gc->mutator_list_lock); Vector_Block *block = pool_get_entry(obj_with_fin_pool); while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter); if(p_obj) vm_finalize_object(p_obj); iter = vector_block_iterator_advance(block, iter); } vector_block_clear(block); pool_put_entry(free_pool, block); block = pool_get_entry(obj_with_fin_pool); } vm_gc_unlock_enum(); }
void* lspace_alloc(unsigned size, Allocator *allocator) { unsigned int try_count = 0; void* p_result = NULL; POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc); Free_Area_Pool* pool = lspace->free_pool; while( try_count < 2 ){ if(p_result = lspace_try_alloc(lspace, alloc_size)) return p_result; /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/ if(try_count == 0){ vm_gc_lock_enum(); /*Check again if there is space for the obj, for maybe other mutator threads issus a GC in the time gap of waiting the gc lock*/ if(p_result = lspace_try_alloc(lspace, alloc_size)){ vm_gc_unlock_enum(); return p_result; } lspace->failure_size = round_up_to_size(alloc_size, KB); gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL); if(lspace->success_ptr){ p_result = lspace->success_ptr; lspace->success_ptr = NULL; vm_gc_unlock_enum(); return p_result; } vm_gc_unlock_enum(); try_count ++; }else{ try_count ++; } } return NULL; }
void select_force_gc() { vm_gc_lock_enum(); if (gc_algorithm < 10) { force_gc(); } else if ((gc_algorithm / 10) == 2) { full_gc(0); } else if ((gc_algorithm / 10) == 3) { heap.old_objects.prev_pos = heap.old_objects.pos; copy_gc(0); } vm_gc_unlock_enum(); hythread_suspend_disable(); vm_hint_finalize(); hythread_suspend_enable(); }