void insert_object_location(GC_Thread *gc_thread, void *dest, Partial_Reveal_Object *p_obj) { assert ((p_obj->get_obj_info() & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK); // Make sure we have the forwarding bit. Obj_Info_Type orig_obj_info = p_obj->get_obj_info() & ~FORWARDING_BIT_MASK; // The original obj_info without the forwarding bit set. if (orig_obj_info) { // obj_info is not zero so remember it. object_lock_save_info *obj_info = (object_lock_save_info *) STD_MALLOC(sizeof(object_lock_save_info)); if (!obj_info) { DIE("Internal but out of c malloc space."); } // Save what needs to be restored. obj_info->obj_header = orig_obj_info; // I need to keep track of the new after-slided address obj_info->p_obj = (Partial_Reveal_Object *) dest; gc_thread->insert_object_header_info_during_sliding_compaction(obj_info); gc_trace (p_obj, "Object being compacted or colocated needs obj_info perserved."); } assert ((p_obj->get_obj_info() & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK); // Make sure we have the forwarding bit. // clobber the header with the new address. assert (dest); assert (((POINTER_SIZE_INT)dest | FORWARDING_BIT_MASK) != (POINTER_SIZE_INT)dest); // This might break on Linux if the heap is in the upper 2 gig of memory. (high bit set) // If it does we need to change to the low bit. p_obj->set_forwarding_pointer(dest); gc_trace (p_obj, "Insert new object location for this object"); gc_trace (dest, "Eventual object location installed."); }
void fix_slots_to_compaction_live_objects(GC_Thread *gc_thread) { block_info *p_compaction_block = NULL; unsigned int num_slots_fixed = 0; while ((p_compaction_block = gc_thread->_p_gc->get_block_for_fix_slots_to_compaction_live_objects(gc_thread->get_id(), p_compaction_block))) { while (true) { Remembered_Set *some_slots = gc_thread->_p_gc->get_slots_into_compaction_block(p_compaction_block); if (some_slots == NULL) { // Done with fixing all slots break; } some_slots->rewind(); Slot one_slot(NULL); while (one_slot.set(some_slots->next().get_address())) { Partial_Reveal_Object *p_obj = one_slot.dereference(); // This slot points into this compaction block assert(GC_BLOCK_INFO(p_obj) == p_compaction_block); assert(p_obj->get_obj_info() & FORWARDING_BIT_MASK); Partial_Reveal_Object *p_new_obj = p_obj->get_forwarding_pointer(); gc_trace(p_obj, " fix_slots_to_compaction_live_objects(): a slot pointed to this object, but the slot is being repointed to " << p_new_obj); gc_trace(p_new_obj, " fix_slots_to_compaction_live_objects(): a slot repointed to this object, but the slot was pointing to " << p_obj); // update slot one_slot.update(p_new_obj); num_slots_fixed++; assert(GC_BLOCK_INFO(p_obj)->in_nursery_p == true); assert(GC_BLOCK_INFO(p_new_obj)->in_nursery_p == true); if (!cross_block_compaction) { // objects move within the same block assert(GC_BLOCK_INFO(p_new_obj) == GC_BLOCK_INFO(p_obj)); } else { // the block this moves into better have been flagged for compaction.. assert(GC_BLOCK_INFO(p_new_obj)->is_compaction_block); } } // while (one_slot) // Delete the remebered set since it is not needed anymore...all slot updates for this list are done delete some_slots; } // while (true) } // while INFOW(gc_thread, "fixed " << num_slots_fixed << " slots"); }
void su_gc(su_state *s) { while (s->gc_gray_size > 0) gc_trace(s); begin(s); while (s->gc_gray_size > 0) scan(s); end(s); }
static void vm_loop(su_state *s, function_t *func) { value_t tmpv, tmpv2; instruction_t inst; int tmp, narg, i, j, k; const char *tmpcs; su_debug_data dbg; s->frame = FRAME(); s->prot = func->prot; #define ARITH_OP(op) \ su_check_type(s, -2, SU_NUMBER); \ su_check_type(s, -1, SU_NUMBER); \ STK(-2)->obj.num = STK(-2)->obj.num op STK(-1)->obj.num; \ su_pop(s, 1); \ break; #define LOG_OP(op) \ su_check_type(s, -2, SU_NUMBER); \ su_check_type(s, -1, SU_NUMBER); \ STK(-2)->type = SU_BOOLEAN; \ STK(-2)->obj.b = STK(-2)->obj.num op STK(-1)->obj.num; \ su_pop(s, 1); \ break; for (s->pc = 0; s->pc < s->prot->num_inst; s->pc++) { tmp = s->interrupt | atomic_get(&s->msi->interrupt); if (tmp) { if ((tmp & ISCOLLECT) == ISCOLLECT) { su_thread_indisposable(s); su_thread_disposable(s); } if ((tmp & IGC) == IGC) { unmask_thread_interrupt(s, IGC); gc_trace(s); } if ((tmp & IBREAK) == IBREAK) { unmask_thread_interrupt(s, IBREAK); dbg.file = s->prot->name->str; dbg.line = s->prot->lineinf[s->pc]; s->debug_cb(s, &dbg, s->debug_cb_data); } } inst = s->prot->inst[s->pc]; switch (inst.id) { case OP_PUSH: push_value(s, &func->constants[inst.a]); break; case OP_POP: su_pop(s, inst.a); break; case OP_ADD: ARITH_OP(+) case OP_SUB: ARITH_OP(-) case OP_MUL: ARITH_OP(*) case OP_DIV: su_check_type(s, -2, SU_NUMBER); su_check_type(s, -1, SU_NUMBER); su_assert(s, STK(-1)->obj.num != 0.0, "Division by zero!"); STK(-2)->obj.num = STK(-2)->obj.num / STK(-1)->obj.num; su_pop(s, 1); break; case OP_MOD: su_check_type(s, -2, SU_NUMBER); su_check_type(s, -1, SU_NUMBER); STK(-2)->obj.num = (double)((int)STK(-2)->obj.num % (int)STK(-1)->obj.num); su_pop(s, 1); break; case OP_POW: su_check_type(s, -2, SU_NUMBER); su_check_type(s, -1, SU_NUMBER); STK(-2)->obj.num = pow(STK(-2)->obj.num, STK(-1)->obj.num); su_pop(s, 1); break; case OP_UNM: su_check_type(s, -1, SU_NUMBER); STK(-1)->obj.num = -STK(-1)->obj.num; break; case OP_EQ: STK(-2)->obj.b = value_eq(STK(-2), STK(-1)); STK(-2)->type = SU_BOOLEAN; su_pop(s, 1); break; case OP_LESS: LOG_OP(<); case OP_LEQUAL: LOG_OP(<=); case OP_NOT: if (STK(-1)->type == SU_BOOLEAN) { STK(-1)->obj.b = !STK(-1)->obj.b; } else { STK(-1)->obj.b = (STK(-1)->type == SU_NIL) ? 1 : 0; STK(-1)->type = SU_BOOLEAN; } break; case OP_AND: tmp = STK(-2)->type != SU_NIL && (STK(-2)->type != SU_BOOLEAN || STK(-2)->obj.b); if (tmp && STK(-1)->type != SU_NIL && (STK(-1)->type != SU_BOOLEAN || STK(-1)->obj.b)) { s->stack[s->stack_top - 2] = *STK(-1); } else { STK(-2)->obj.b = 0; STK(-2)->type = SU_BOOLEAN; } su_pop(s, 1); break; case OP_OR: if (STK(-2)->type != SU_NIL && (STK(-2)->type != SU_BOOLEAN || STK(-2)->obj.b)) { /* return -2 */ } else if (STK(-1)->type != SU_NIL && (STK(-1)->type != SU_BOOLEAN || STK(-1)->obj.b)) { s->stack[s->stack_top - 2] = *STK(-1); } else { STK(-2)->obj.b = 0; STK(-2)->type = SU_BOOLEAN; } su_pop(s, 1); break; case OP_TEST: if (STK(-1)->type != SU_NIL && (STK(-1)->type != SU_BOOLEAN || STK(-1)->obj.b)) s->pc = inst.b - 1; su_pop(s, 1); break; case OP_FOR: if (STK(-2)->type == SU_NIL) { su_swap(s, -2, -1); s->stack_top--; s->pc = inst.b - 1; } else { s->stack_top--; su_check_type(s, -1, SU_SEQ); su_rest(s, -1); su_swap(s, -2, -1); su_first(s, -1); su_swap(s, -2, -1); s->stack_top--; } break; case OP_JMP: s->pc = inst.b - 1; break; case OP_RETURN: s->pc = s->frame->ret_addr - 1; s->prot = s->frame->func->prot; func = s->frame->func; s->stack[s->frame->stack_top] = *STK(-1); s->stack_top = s->frame->stack_top + 1; s->frame_top--; s->frame = FRAME(); break; case OP_TCALL: s->pc = s->frame->ret_addr - 1; s->prot = s->frame->func->prot; func = s->frame->func; memmove(&s->stack[s->frame->stack_top], &s->stack[s->stack_top - (inst.a + 1)], sizeof(value_t) * (inst.a + 1)); s->stack_top = s->frame->stack_top + inst.a + 1; s->frame_top--; s->frame = FRAME(); /* Do a normal call. */ case OP_CALL: tmp = s->stack_top - inst.a - 1; switch (s->stack[tmp].type) { case SU_FUNCTION: s->frame = &s->frames[s->frame_top++]; assert(s->frame_top <= MAX_CALLS); s->frame->ret_addr = s->pc + 1; s->frame->func = func; s->frame->stack_top = tmp; func = s->stack[tmp].obj.func; if (func->narg < 0) su_vector(s, inst.a); else if (func->narg != inst.a) su_error(s, "Bad number of arguments to function! Expected %i, but got %i.", (int)func->narg, (int)inst.a); s->prot = func->prot; s->pc = -1; break; case SU_NATIVEFUNC: narg = s->narg; s->narg = inst.a; if (s->stack[tmp].obj.nfunc(s, inst.a)) { s->stack[tmp] = *STK(-1); } else { s->stack[tmp].type = SU_NIL; } s->stack_top = tmp + 1; s->narg = narg; break; case SU_VECTOR: if (inst.a == 1) { su_check_type(s, -1, SU_NUMBER); tmpv = vector_index(s, s->stack[tmp].obj.vec, su_tointeger(s, -1)); su_pop(s, 2); push_value(s, &tmpv); } else { for (i = -inst.a, j = 0; i; i++, j++) { su_check_type(s, i - j, SU_NUMBER); tmpv = vector_index(s, s->stack[tmp].obj.vec, su_tointeger(s, i - j)); push_value(s, &tmpv); } su_vector(s, inst.a); s->stack[tmp] = s->stack[s->stack_top - 1]; s->stack_top -= inst.a + 1; } break; case SU_MAP: if (inst.a == 1) { tmpv2 = *STK(-1); tmpv = map_get(s, s->stack[tmp].obj.m, &tmpv2, hash_value(&tmpv2)); su_assert(s, tmpv.type != SU_INV, "No value with key: %s", stringify(s, &tmpv2)); su_pop(s, 2); push_value(s, &tmpv); } else { for (i = -inst.a, j = 0; i; i++, j += 2) { tmpv2 = *STK(i - j); push_value(s, &tmpv2); tmpv = map_get(s, s->stack[tmp].obj.m, &tmpv2, hash_value(&tmpv2)); su_assert(s, tmpv.type != SU_INV, "No value with key: %s", stringify(s, &tmpv2)); push_value(s, &tmpv); } su_map(s, inst.a); s->stack[tmp] = s->stack[s->stack_top - 1]; s->stack_top -= inst.a + 1; } break; case SU_STRING: if (inst.a == 1) { su_check_type(s, -1, SU_NUMBER); j = su_tointeger(s, -1); su_assert(s, j < s->stack[tmp].obj.str->size, "Out of range!"); s->scratch_pad[0] = s->stack[tmp].obj.str->str[j]; su_pop(s, 2); su_pushbytes(s, s->scratch_pad, 1); } else { k = 0; for (i = -inst.a; i; i++) { su_check_type(s, i, SU_NUMBER); j = su_tointeger(s, i); su_assert(s, j < s->stack[tmp].obj.str->size, "Out of range!"); s->scratch_pad[k++] = s->stack[tmp].obj.str->str[j]; assert(k < SU_SCRATCHPAD_SIZE); } su_pushbytes(s, s->scratch_pad, k); s->stack[tmp] = s->stack[s->stack_top - 1]; s->stack_top -= inst.a + 1; } break; case SU_NATIVEDATA: tmpv = s->stack[tmp]; if (tmpv.obj.data->vt && tmpv.obj.data->vt->call) { narg = s->narg; s->narg = inst.a; if (tmpv.obj.data->vt->call(s, (void*)tmpv.obj.data->data, inst.a)) s->stack[tmp] = *STK(-1); else s->stack[tmp].type = SU_NIL; s->stack_top = tmp + 1; s->narg = narg; break; } default: if (inst.a == 1 && isseq(s, &s->stack[tmp])) { su_check_type(s, -1, SU_STRING); tmpcs = su_tostring(s, -1, NULL); if (!strcmp(tmpcs, "first")) { s->stack[(--s->stack_top) - 1] = seq_first(s, STK(-1)->obj.q); break; } else if (!strcmp(tmpcs, "rest")) { s->stack[(--s->stack_top) - 1] = seq_rest(s, STK(-1)->obj.q); break; } } su_error(s, "Can't apply '%s'.", type_name(s->stack[tmp].type)); } break; case OP_LAMBDA: assert(inst.a < s->prot->num_prot); lambda(s, &s->prot->prot[inst.a], inst.b); break; case OP_GETGLOBAL: tmpv = func->constants[inst.a]; su_assert(s, tmpv.type == SU_STRING, "Global key must be a string!"); tmpv = map_get(s, unref_local(s, s->stack[SU_GLOBAL_INDEX].obj.loc).obj.m, &tmpv, hash_value(&tmpv)); if (tmpv.type == SU_INV) global_error(s, "Undefined global variable", &func->constants[inst.a]); push_value(s, &tmpv); break; case OP_SETGLOBAL: tmpv = func->constants[inst.a]; su_assert(s, tmpv.type == SU_STRING, "Global key must be a string!"); i = hash_value(&tmpv); tmpv2 = unref_local(s, s->stack[SU_GLOBAL_INDEX].obj.loc); tmpv = map_insert(s, tmpv2.obj.m, &tmpv, i, STK(-1)); set_local(s, s->stack[SU_GLOBAL_INDEX].obj.loc, &tmpv); break; case OP_SHIFT: s->stack[s->stack_top - (inst.a + 1)] = *STK(-1); s->stack_top -= inst.a; break; case OP_LOAD: assert(FRAME()->stack_top + inst.a < s->stack_top); push_value(s, &s->stack[FRAME()->stack_top + inst.a]); break; case OP_LUP: assert(inst.a < func->num_ups); push_value(s, &func->upvalues[inst.a]); break; case OP_LCL: assert(inst.b < s->msi->num_c_lambdas); push_value(s, &s->msi->c_lambdas[inst.b]); break; default: assert(0); } #undef ARITH_OP #undef LOG_OP } }
// // Input/Output next_obj_start_arg - a pointer to a pointer to where the // fused objects will eventually reside. Updated to the end of the fused // objects. // bool fuse_objects (GC_Thread *gc_thread, Partial_Reveal_Object *p_obj, void **next_obj_start_arg, unsigned int *problem_locks) { bool UNUSED debug_printf_trigger = false; unsigned int moved_count = 0; unsigned int unmoved_count = 0; // If we can fuse an object we do and return it. assert (p_obj->vt()->get_gcvt()->gc_fuse_info); gc_trace (p_obj, "This object is a candidate for fusing with next object."); Partial_Reveal_Object *scan_stack[MAX_FUSABLE_OBJECT_SCAN_STACK]; unsigned top = 0; Partial_Reveal_Object *fuse_queue[MAX_FUSED_OBJECT_COUNT]; unsigned last = 0; scan_stack[top++] = p_obj; unsigned int fused_size = get_object_size_bytes(p_obj); unsigned int base_obj_size = fused_size; void *to_obj = *next_obj_start_arg; void * UNUSED debug_orig_to_obj = to_obj; // Claim the Forwading bit if you can. If you loose the race you can't fuse since someone else is. Obj_Info_Type old_base_value = p_obj->get_obj_info(); Obj_Info_Type new_base_value = old_base_value; if ((old_base_value & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK) { return false; // Some other thread is going to move this object. } new_base_value = old_base_value | FORWARDING_BIT_MASK; if (p_obj->compare_exchange(new_base_value, old_base_value) != old_base_value) { // We did not get the forwarding pointer successfully, some other thread got it. // Since this is the base object we can just return false. return false; } // Build a queue of objects to colocate but do not grab the FORWARDING_BIT until the queue is built. while (top > 0) { Partial_Reveal_Object *p_cur_obj = scan_stack[--top]; int *offset_scanner = init_fused_object_scanner(p_cur_obj); Slot pp_next_object(NULL); Partial_Reveal_Object *p_last_object = p_obj; while (pp_next_object.set(p_get_ref(offset_scanner, p_cur_obj)) != NULL) { // Move the scanner to the next reference. offset_scanner = p_next_ref (offset_scanner); // This object is to be fused with the object located at the gc_fuse_info so calculate the required size. Partial_Reveal_Object *p_next_from_obj = pp_next_object.dereference(); gc_trace (p_next_from_obj, "This object is a candidate to be fused with previous object."); if (p_next_from_obj) { // Check NULL. block_info *fuse_block_info = GC_BLOCK_INFO(p_next_from_obj); void * next_natural_obj = (void *) (POINTER_SIZE_INT(p_last_object) + get_object_size_bytes(p_last_object)); Obj_Info_Type new_value = p_next_from_obj->get_obj_info(); bool is_colocation_natural = (next_natural_obj == (void *)p_next_from_obj); bool overflow = (((POINTER_SIZE_INT)to_obj + fused_size + get_object_size_bytes(p_next_from_obj)) > (POINTER_SIZE_INT)(GC_BLOCK_CEILING(to_obj))); bool already_forwarded = ((new_value & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK); bool in_compaction_block = gc_thread->_p_gc->is_compaction_block(fuse_block_info); bool can_fuse = ((!already_forwarded) && (!is_colocation_natural) && (!overflow) && in_compaction_block ); if (can_fuse){ if (p_next_from_obj->vt()->get_gcvt()->gc_fuse_info) { scan_stack[top++] = p_next_from_obj; } fuse_queue[last] = p_next_from_obj; fused_size += get_object_size_bytes(p_next_from_obj); last++; } else { p_obj->set_obj_info(old_base_value); // Release the forwarding bit and don't colocate this object. return false; } } } } unsigned i; // Grab the forwarding bits for the other object in the queue.. If you can't get a bit // remove the object from the queue. for (i = 0; i < last; i++) { Partial_Reveal_Object *p_fuse_obj = fuse_queue[i]; Obj_Info_Type new_value = p_fuse_obj->get_obj_info(); Obj_Info_Type old_value = new_value; bool already_forwarded = ((new_value & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK); new_value = old_value | FORWARDING_BIT_MASK; // Create the value with a the forwarding bit set. if (!already_forwarded) { // install the forwarding bit if it has not already been forwarded. already_forwarded = (p_fuse_obj->compare_exchange(new_value, old_value) != old_value); } if (already_forwarded) { debug_printf_trigger = true; TRACE("REMOVING FROM FUSE QUEUE."); // Remove this object from the queue since we can colocate it. unsigned int j; for (j = i; j < last - 1; j++) { fuse_queue[j] = fuse_queue[j+1]; } // We have one less object on the queue. fuse_queue[last] = NULL; last--; i--; // Redo since fuse_queue[i] now holds a new object. unmoved_count++; } gc_trace (p_fuse_obj, "No space so this object is not fused with parent."); } // We don't fuse more than a single block worth of objects. assert (fused_size <= GC_BLOCK_ALLOC_SIZE); // We own all the forwarding bits in all the objects in the fuse_queue. // If we only have the base object and no other object to colocate with it just return. if (last == 0) { p_obj->set_obj_info(old_base_value); // Release the forwarding bit and don't colocate this object. // No objects to relocate. TRACE("3"); return false; } // At this point all objects in the queue will be fused, we have the forwarding bits // so we now figure out where they will be colocated. gc_trace (p_obj, "Fusing this object with offspring."); assert ((POINTER_SIZE_INT)(GC_BLOCK_INFO (to_obj + get_object_size_bytes(p_obj) - 1)) <= (POINTER_SIZE_INT)(GC_BLOCK_CEILING(to_obj))); assert ((p_obj->get_obj_info() & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK); if (object_info_is_not_zero(p_obj)) { if ((p_obj->get_obj_info() & ~FORWARDING_BIT_MASK) != 0) { object_lock_save_info *obj_info = (object_lock_save_info *) STD_MALLOC(sizeof(object_lock_save_info)); assert(obj_info); // Save what needs to be restored. obj_info->obj_header = p_obj->get_obj_info(); obj_info->obj_header = obj_info->obj_header & ~FORWARDING_BIT_MASK; // Clear forwarding bit. // I need to keep track of the new after-slided address obj_info->p_obj = (Partial_Reveal_Object *) to_obj; gc_thread->insert_object_header_info_during_sliding_compaction(obj_info); *problem_locks = *problem_locks + 1;; // placement code does not deal with this so this is likely to be wrong. gc_trace (p_obj, "Object being fused needs obj_info preserved."); debug_printf_trigger = true; INFO("preserving base fused object header"); } } // Finally deal with this placement, moving the base object first. insert_object_location (gc_thread, to_obj, p_obj); gc_trace (to_obj, " In allocate_forwarding_pointers_for_compaction_live_objects forwarding *to* this location. (vtable not yet legal)"); gc_trace(p_obj, " was forwarded..."); if (verify_live_heap) { add_repointed_info_for_thread(p_obj, (Partial_Reveal_Object *) to_obj, gc_thread->get_id()); } assert (base_obj_size == get_object_size_bytes(p_obj)); to_obj = (void *) ((POINTER_SIZE_INT) to_obj + base_obj_size); // Now figure out where the referent objects belong and set up their forwarding pointers. for (i = 0; i < last; i++) { Partial_Reveal_Object *p_fuse_obj = fuse_queue[i]; unsigned int fused_obj_size = get_object_size_bytes(p_fuse_obj); gc_trace (p_fuse_obj, "Fusing this object with parent."); // Finally deal with this colocations. assert (p_fuse_obj != p_obj); // Nulls should have been filtered out up above. if (object_info_is_not_zero(p_fuse_obj)) { if ((p_fuse_obj->get_obj_info() & ~FORWARDING_BIT_MASK) != 0) { object_lock_save_info *obj_info = (object_lock_save_info *) STD_MALLOC(sizeof(object_lock_save_info)); assert(obj_info); // Save what needs to be restored. obj_info->obj_header = p_fuse_obj->get_obj_info(); obj_info->obj_header = obj_info->obj_header & ~FORWARDING_BIT_MASK; // Clear forwarding bit. // I need to keep track of the new after-slided address obj_info->p_obj = (Partial_Reveal_Object *) to_obj; gc_thread->insert_object_header_info_during_sliding_compaction(obj_info); *problem_locks = *problem_locks + 1;; // placement code does not deal with this so this is likely to be wrong. gc_trace (p_fuse_obj, "Object being fused needs obj_info preserved."); debug_printf_trigger = true; INFO("preserving fused object header"); } } // Counts are not thread safe but it is just an approximation.... moved_count++; // The object in the queue its forwarding bit set. { POINTER_SIZE_INT UNUSED next_available = (POINTER_SIZE_INT)to_obj + get_object_size_bytes(p_fuse_obj) -1; assert ((fuse_queue[i]->get_obj_info() & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK); assert (next_available <= ((POINTER_SIZE_INT)(GC_BLOCK_CEILING(to_obj)))); } insert_object_location(gc_thread, to_obj, p_fuse_obj); gc_trace (to_obj, " In allocate_forwarding_pointers_for_compaction_live_objects forwarding *to* this location. (vtable not yet legal)"); gc_trace(p_obj, " was forwarded..."); if (verify_live_heap) { add_repointed_info_for_thread(p_fuse_obj, (Partial_Reveal_Object *) to_obj, gc_thread->get_id()); } to_obj = (void *) ((POINTER_SIZE_INT) to_obj + fused_obj_size); } *next_obj_start_arg = to_obj; // Update and return. TRACE("next_obj_start_arg addr: " << next_obj_start_arg << ", old_val " << debug_orig_to_obj << ", new_val " << to_obj); return true; }
void gc_sweep(void) { PObject *start = (PObject *)hbase; PObject *cur = start; PObject *prev = (PObject *)hedge; uint32_t flags; uint32_t prev_is_free = 0; debug( "\n\n<<<<<<<SWEEP started\n"); /*heapwalk and create free list */ hblocks = 0; hfblocks = 0; hfreemem = 0; while (cur != (PObject *)hedge) { flags = GCH_FLAG(cur); //debug("cur is %x with flag %i\n", cur, flags); switch (flags) { case GC_FREE: /*case GC_USED_UNMARKED:*/ gc_sweep_free: hfreemem += GCH_SIZE(cur); debug("sweeping %x\n", cur); //debug("found free block: %i (%i,%i) %i\n",cur,hblocks,hfblocks,GCH_FLAG(cur)); if (prev_is_free && (GCH_SIZE(prev) + GCH_SIZE(cur) <= 0xffff)) { /* coalesce */ GCH_SIZE_SET(prev, GCH_SIZE(prev) + GCH_SIZE(cur)); //debug("...coalesced with prev %i~%i size %i\n",prev,cur,GCH_SIZE(prev)); prev_is_free = (GCH_SIZE(prev) < 0xffff); } else { prev_is_free = (GCH_SIZE(cur) < 0xffff); GCH_NEXT_SET(prev, cur); /*unset mark flags. First time, sets hedge->next */ prev = cur; hfblocks++; //debug("...splitted prev %i~%i size %i (%i,%i)\n",prev,cur,GCH_SIZE(prev),hblocks,hfblocks); } break; case GC_USED_UNMARKED: if (PHEADERTYPE(cur) == PSYSOBJ) { if (((PSysObject *)cur)->type == PSYS_TIMER) { //if we are here, timer is not in timers and not reachable...free vos mem _gc_free( ((PSysObject *)cur)->sys.timer.vtm); } else if (((PSysObject *)cur)->type == PSYS_SEMAPHORE){ //if it's a lost semaphore, still active, anything can happen :-o _gc_free( ((PSysObject *)cur)->sys.sem); } } goto gc_sweep_free; case GC_USED_MARKED: prev_is_free = 0; GCH_FLAG_SET(cur, GC_USED); hblocks++; //debug("found marked block: %i (%i,%i) %i\n",cur,hblocks,hfblocks,GCH_FLAG(cur)); break; case GC_STAGED: /* don't touch flags */ prev_is_free = 0; hblocks++; //debug("found staged block: %i (%i,%i) %i\n",cur,hblocks,hfblocks,GCH_FLAG(cur)); break; } /* skip current */ cur = (PObject *)(((uint8_t *)(cur)) + GCH_SIZE(cur)); } if (prev_is_free) { /* coalesce with hedge */ hfreemem -= GCH_SIZE(prev); GCH_NEXT_SET(prev, GCH_NEXT(hedge)); hedge = (uint8_t *)prev; //debug("coalesced with hedge\n"); } else { GCH_NEXT_SET(prev, hedge); //debug("not coalesced with hedge\n"); hfblocks++; } debug( "S%i\n", hfreemem); hfreemem += ((uint32_t)hend - (uint32_t)hedge); debug( "S%i\n", hfreemem); GCH_SIZE_SET(PNT(hedge), MIN(0xffff, hfreemem)); gc_trace(); debug( "<<<<<<<<SWEEP stopped (%i,%i)\n\n\n", hblocks, hfblocks); }
//TODO: remove the stack and make a list of used/marked via header!! phead vs ptail void gc_mark() { int i; PObject *obj; debug( "\n\n>>>>>>>MARK started %x\n", _phead); gc_trace(); gc_keep_root(); while (_phead) { obj = _phead; _phead = GCH_NEXT(obj); int tt = PTYPE(obj); switch (tt) { case PBYTEARRAY: case PSHORTARRAY: if (_PMS(obj)->seq) GC_MARK( _PMS(obj)->seq); break; case PLIST: case PTUPLE: { i = PSEQUENCE_ELEMENTS(obj); PObject **objs = PSEQUENCE_OBJECTS(obj); if (objs) { GC_KEEP_MANY(objs, i); if (PSEQUENCE_BUFFER(obj)) { GC_MARK(PSEQUENCE_BUFFER(obj)); } } } break; case PFSET: case PSET: case PDICT: { PDict *tmp = (PDict *)obj; int e = 0; HashEntry *ee; while ( (ee = phash_getentry(tmp, e++)) != NULL ) { GC_KEEP_NOTNULL(ee->key); if (tt == PDICT) GC_KEEP_NOTNULL(ee->value); } if (tmp->entry) GC_MARK(tmp->entry); } break; case PFUNCTION: { PFunction *tmp = (PFunction *)obj; if (tmp->defargs) GC_KEEP_MANY(tmp->storage, tmp->defargs); //TODO: the following can be optimized by avoiding the check on names... //and use macros to access function fields for portability... if (tmp->defkwargs) GC_KEEP_MANY(tmp->storage + tmp->defargs, 2 * tmp->defkwargs); if (PCODE_CELLVARS(PCODE_MAKE(tmp->codeobj))) { obj = (PObject *)PFUNCTION_GET_CLOSURE(tmp); GC_KEEP(obj); //if (obj) // gc_keep_cells((PTuple *)obj); } } break; case PMETHOD: { PMethod *tmp = (PMethod *)obj; GC_KEEP(tmp->self); GC_KEEP(tmp->fn); } break; case PCLASS: { PClass *tmp = (PClass *)obj; GC_KEEP(tmp->bases); GC_KEEP(tmp->dict); } break; case PINSTANCE: { PInstance *tmp = (PInstance *)obj; GC_KEEP(tmp->base); GC_KEEP(tmp->dict); } break; case PITERATOR: GC_KEEP( ((PIterator *)obj)->iterable ); break; case PFRAME: { //debug("checking frame %i\n", obj); PFrame *tmp = (PFrame *)obj; PCode *code = PCODE_MAKE(tmp->code); GC_KEEP(tmp->parent); //GC_KEEP(tmp->block); GC_KEEP_MANY(PFRAME_PSTACK(tmp), tmp->sp); GC_KEEP_MANY(PFRAME_PLOCALS(tmp, code), code->nlocals); if (PCODE_HASVARS(code)) GC_KEEP_MANY(PFRAME_VARS(tmp, code), 2); /*if (PCODE_FREEVARS(code)) { obj = PFRAME_TFREEVARS(tmp,code);//(PObject *)PFRAME_FREEVARS(tmp); debug("keeping freevars %x for %x\n", obj, tmp); GC_KEEP(obj); } debug("FRAME %x %i %i\n", tmp, PCODE_CELLVARS(code), PCODE_FREEVARS(code)); if (PCODE_CELLVARS(code)) { obj = (PObject *)PFRAME_CELLVARS(tmp); debug("keeping cells for %x\n", obj); GC_KEEP(obj); //if (obj) // gc_keep_cells((PTuple *)obj); } */ } break; /*case PBLOCK: //debug("checking block %i\n", obj); GC_KEEP(((PBlock *)obj)->next); break; */ case PSYSOBJ: { PSysObject *tmp = (PSysObject *) obj; SYSLOCK(); if (tmp->type == PSYS_TIMER) { GC_KEEP(tmp->sys.timer.fn.fn); GC_KEEP(tmp->sys.timer.fn.args); } SYSUNLOCK(); } break; default: break; } //no need to mark obj. It has already been marked in gc_keep //GC_MARK(obj); } gc_trace(); _phead = NULL; obj = PNT(_vm.thlist); do { PThread *pth = (PThread *)obj; debug( "Scanning thread %x %i for consts %x %i\n", pth, pth->header.flags, pth->cocache, pth->cachesize); for (i = 0; i < pth->cachesize; i++) { PObject *co = pth->cocache[i].obj; if (co && GCH_FLAG(co) != GC_USED) continue; //not marked, not staged, not null, not const_marked: remove it pth->cocache[i].obj = NULL; } obj = PNT(pth->next); } while (obj != PNT(_vm.thlist)); debug( ">>>>>>>MARK stopped\n\n\n"); }