char* PrimitivesGenerator::inline_allocation() { Address klass_addr = Address(esp, +2 * oopSize); Address count_addr = Address(esp, +1 * oopSize); Label need_scavenge1, fill_object1, need_scavenge2, fill_object2, loop, loop_test, exit; int size = 2; char* entry_point = masm->pc(); masm->movl(ebx, klass_addr); masm->movl(edx, count_addr); masm->testl(edx, 1); masm->jcc(Assembler::notEqual, exit); masm->sarl(edx, 3); masm->bind(loop); test_for_scavenge(eax, size * oopSize, need_scavenge1); masm->bind(fill_object1); masm->movl(Address(eax, (-size+0)*oopSize), 0x80000003); // obj->init_mark() masm->movl(Address(eax, (-size+1)*oopSize), ebx); // obj->init_mark() masm->subl(eax, (size * oopSize) - 1); test_for_scavenge(ecx, size * oopSize, need_scavenge2); masm->bind(fill_object2); masm->movl(Address(ecx, (-size+0)*oopSize), 0x80000003); // obj->init_mark() masm->movl(Address(ecx, (-size+1)*oopSize), ebx); // obj->init_mark() masm->subl(ecx, (size * oopSize) - 1); //masm->jmp(loop); masm->bind(loop_test); masm->decl(edx); masm->jcc(Assembler::notEqual, loop); masm->bind(exit); masm->ret(2 * oopSize); masm->bind(need_scavenge1); masm->pushl(ebx); masm->pushl(edx); scavenge(size); masm->popl(edx); masm->popl(ebx); masm->jmp(fill_object1); masm->bind(need_scavenge2); masm->pushl(ebx); masm->pushl(edx); scavenge(size); masm->movl(ecx, eax); masm->popl(edx); masm->popl(ebx); masm->jmp(fill_object2); return entry_point; }
static int scav_component(struct object *ptr) { struct component *component = (struct component *)ptr; int i; scavenge(&component->debug_name); scavenge(&component->mtime); scavenge(&component->source_file); scavenge(&component->debug_info); for (i = 0; i < component->n_constants; i++) scavenge(component->constant + i); return component->length; }
static void scav_waiters(struct waiters *waiters) { int fd; for (fd = 0; fd < FD_SETSIZE; fd++) scavenge(waiters->events + fd); }
char* PrimitivesGenerator::allocateContext(int n) { Label need_scavenge, fill_object; int size = n + 3; char* entry_point = masm->pc(); test_for_scavenge(eax, size * oopSize, need_scavenge); masm->bind(fill_object); masm->movl(ebx, contextKlass_addr()); masm->movl(ecx, nil_addr()); masm->movl(Address(eax, (-size+0)*oopSize), 0x80000003 + ((n+1) * 4));// obj->init_mark() masm->movl(Address(eax, (-size+1)*oopSize), ebx); // obj->set_klass(klass) masm->movl(Address(eax, (-size+2)*oopSize), 0); // obj->set_home(NULL) for (int i = 0; i < n; i++) { masm->movl(Address(eax, (-size+3+i)*oopSize), ecx); // obj->obj_at_put(i,nilObj) } masm->subl(eax, size * oopSize - 1); masm->ret(0); masm->bind(need_scavenge); scavenge(size); masm->jmp(fill_object); return entry_point; }
void *gc_alloc(value *desc, unsigned size, value *sp) { unsigned alloc_size; word *p = NULL; header *h; if (debug['z']) gc_collect(sp); size = round_up(size+4, BYTES_PER_WORD); if (size <= MAX_SMALL_BYTES) { /* Try to allocate from the appropriate pool */ unsigned index = pool_map(size); alloc_size = pool_size(index); ASSERT(alloc_size >= size); if (free_count[index] == 0) { while (pool_total + pool_block(index) > heap_size && free_count[index] == 0) scavenge(sp, pool_block(index)); if (free_count[index] == 0) add_block(index); } p = (word *) free_ptr[index]; free_ptr[index] += alloc_size; free_count[index]--; } else { /* Allocate whole pages */ alloc_size = round_up(size, GC_PAGESIZE); while (pool_total + alloc_size > heap_size) scavenge(sp, alloc_size); h = find_block(alloc_size, alloc_size); insert(block_pool[n_sizes], h); pool_total += alloc_size; p = (word *) h->h_memory; } alloc_since_gc += alloc_size; DEBUG_PRINT('c', ("[Alloc %d %p]", size, p)); *p = (word) desc; return p+1; }
/* * RUN THE COLLECTOR * args * Stack* s -- local variable stack * heap** h -- current (pointer to pointer to) from-space * int bd -- are we collecting BigData heap? (bool: 0 false, otherwise true) */ void collect(Stack* s, heap** h, int bd){ heap* to = heapCreate(); weakptrs = stackCreate(); // If stack is empty, return empty heap if(!s){ *h = to; return; } //evacuate things pointed to from the stack, update stack references do{ s->data = evac(s->data, *h, to); }while(s=s->next); // if collecting bigdata heap, no need to scavenge if(bd){ *h = to; return; } //scavenge the to-space scavenge(*h,to); /* * Check weak pointer references. If data is already forwarded, we know * it's got another reference -- copy the forwarding pointer loc. * Otherwise, we collect the data. */ int i; while((i = pop(&weakptrs)) != -1){ if((*h)->heap[to->heap[i+1]] == FWD) to->heap[i+1] = (*h)->heap[to->heap[i+1]+1]; else to->heap[i+1] = -1; } //Change reference of from-space to to-space free(*h); *h = to; //collect big data heap if(!(collectioncount % BIGDATACOLLECT)){ if(!bigdataindex) return; collect(bigdataindex, &bigdataheap, 1); Stack* bdi = bigdataindex; do{ (*h)->heap[bdi->bdloc] = bdi->data; }while(bdi=bdi->next); } }
void next_turn(World *world, Bum *bum) { if (world->A != NULL && world->B != NULL) { buy_food(bum); scavenge(bum); if (rand() % 100 > 70) { bum->health -= (rand() % 8) + 25; } buy_booze(bum); heal_wounds(bum); cure_addiction(bum); bum->hunger += rand() % 10 + 10; if (bum->hunger > 100 || bum->health < 0 || bum->addiction > 100) { bury_bum(world, bum, bum->id); } if (bum->next != NULL) { next_turn(world, bum->next); } } }
char* PrimitivesGenerator::primitiveNew(int n) { Address klass_addr = Address(esp, +2 * oopSize); Label need_scavenge, fill_object; int size = n+2; // %note: it looks like the compiler assumes we spill only eax/ebx here -Marc 04/07 char* entry_point = masm->pc(); test_for_scavenge(eax, size * oopSize, allocation_failure); Address _stop = Address((int)&stop, relocInfo::external_word_type); Label _break, no_break; masm->bind(fill_object); masm->movl(ebx, _stop); masm->testl(ebx, ebx); masm->jcc(Assembler::notEqual, _break); masm->bind(no_break); masm->movl(ebx, klass_addr); masm->movl(Address(eax, (-size+0)*oopSize), 0x80000003); // obj->init_mark() masm->movl(Address(eax, (-size+1)*oopSize), ebx); // obj->init_mark() if (n>0) { masm->movl(ebx, nil_addr()); for (int i = 0; i < n; i++) { masm->movl(Address(eax, (-size+2+i)*oopSize), ebx); // obj->obj_at_put(i,nilObj) } } masm->subl(eax, (size * oopSize) - 1); masm->ret(2 * oopSize); masm->bind(_break); masm->int3(); masm->jmp(no_break); masm->bind(need_scavenge); scavenge(size); masm->jmp(fill_object); return entry_point; }
char* PrimitivesGenerator::allocateBlock(int n) { klassOopDesc** block_klass; switch (n) { case 0 : block_klass = &::zeroArgumentBlockKlassObj; break; case 1 : block_klass = &::oneArgumentBlockKlassObj; break; case 2 : block_klass = &::twoArgumentBlockKlassObj; break; case 3 : block_klass = &::threeArgumentBlockKlassObj; break; case 4 : block_klass = &::fourArgumentBlockKlassObj; break; case 5 : block_klass = &::fiveArgumentBlockKlassObj; break; case 6 : block_klass = &::sixArgumentBlockKlassObj; break; case 7 : block_klass = &::sevenArgumentBlockKlassObj; break; case 8 : block_klass = &::eightArgumentBlockKlassObj; break; case 9 : block_klass = &::nineArgumentBlockKlassObj; break; } Address block_klass_addr = Address((int)block_klass, relocInfo::external_word_type); Label need_scavenge, fill_object; char* entry_point = masm->pc(); test_for_scavenge(eax, 4 * oopSize, need_scavenge); masm->bind(fill_object); masm->movl(ebx, block_klass_addr); masm->movl(Address(eax, -4*oopSize), 0x80000003); // obj->init_mark() masm->movl(Address(eax, -3*oopSize), ebx); // obj->set_klass(klass) // masm->movl(Address(eax, -2*oopSize), 0); // obj->set_method(NULL) // masm->movl(Address(eax, -1*oopSize), 0); // obj->set_lexical_scope(NULL) masm->subl(eax, (4 * oopSize) - 1); masm->ret(0); masm->bind(need_scavenge); scavenge(4); masm->jmp(fill_object); return entry_point; }
Allocator::~Allocator() { scavenge(); }
Deallocator::~Deallocator() { scavenge(); }
void Heap::concurrentScavenge() { std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex()); scavenge(lock, scavengeSleepDuration); }
static rtsBool scavenge_one(StgPtr p) { const StgInfoTable *info; rtsBool no_luck; rtsBool saved_eager_promotion; saved_eager_promotion = gct->eager_promotion; ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl((StgClosure *)p); switch (info->type) { case MVAR_CLEAN: case MVAR_DIRTY: { StgMVar *mvar = ((StgMVar *)p); gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&mvar->head); evacuate((StgClosure **)&mvar->tail); evacuate((StgClosure **)&mvar->value); gct->eager_promotion = saved_eager_promotion; if (gct->failed_to_evac) { mvar->header.info = &stg_MVAR_DIRTY_info; } else { mvar->header.info = &stg_MVAR_CLEAN_info; } break; } case THUNK: case THUNK_1_0: case THUNK_0_1: case THUNK_1_1: case THUNK_0_2: case THUNK_2_0: { StgPtr q, end; end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs; for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) { evacuate((StgClosure **)q); } break; } case FUN: case FUN_1_0: // hardly worth specialising these guys case FUN_0_1: case FUN_1_1: case FUN_0_2: case FUN_2_0: case CONSTR: case CONSTR_1_0: case CONSTR_0_1: case CONSTR_1_1: case CONSTR_0_2: case CONSTR_2_0: case WEAK: case PRIM: case IND_PERM: { StgPtr q, end; end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) { evacuate((StgClosure **)q); } break; } case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { StgPtr q = p; gct->eager_promotion = rtsFalse; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; } else { ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; } break; } case BLOCKING_QUEUE: { StgBlockingQueue *bq = (StgBlockingQueue *)p; gct->eager_promotion = rtsFalse; evacuate(&bq->bh); evacuate((StgClosure**)&bq->owner); evacuate((StgClosure**)&bq->queue); evacuate((StgClosure**)&bq->link); gct->eager_promotion = saved_eager_promotion; if (gct->failed_to_evac) { bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; } else { bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; } break; } case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; evacuate(&s->selectee); break; } case AP_STACK: { StgAP_STACK *ap = (StgAP_STACK *)p; evacuate(&ap->fun); scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); p = (StgPtr)ap->payload + ap->size; break; } case PAP: p = scavenge_PAP((StgPAP *)p); break; case AP: p = scavenge_AP((StgAP *)p); break; case ARR_WORDS: // nothing to follow break; case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: { // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. gct->eager_promotion = rtsFalse; scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); if (gct->failed_to_evac) { ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; } else { ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; } gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; break; } case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: { // follow everything scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); // If we're going to put this object on the mutable list, then // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. if (gct->failed_to_evac) { ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; } else { ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; } break; } case TSO: { scavengeTSO((StgTSO*)p); break; } case STACK: { StgStack *stack = (StgStack*)p; gct->eager_promotion = rtsFalse; scavenge_stack(stack->sp, stack->stack + stack->stack_size); stack->dirty = gct->failed_to_evac; gct->eager_promotion = saved_eager_promotion; break; } case MUT_PRIM: { StgPtr end; gct->eager_promotion = rtsFalse; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { evacuate((StgClosure **)p); } gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable break; } case TREC_CHUNK: { StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); evacuate((StgClosure **)&e->expected_value); evacuate((StgClosure **)&e->new_value); } gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable break; } case IND: // IND can happen, for example, when the interpreter allocates // a gigantic AP closure (more than one block), which ends up // on the large-object list and then gets updated. See #3424. case BLACKHOLE: case IND_STATIC: evacuate(&((StgInd *)p)->indirectee); #if 0 && defined(DEBUG) if (RtsFlags.DebugFlags.gc) /* Debugging code to print out the size of the thing we just * promoted */ { StgPtr start = gen->scan; bdescr *start_bd = gen->scan_bd; nat size = 0; scavenge(&gen); if (start_bd != gen->scan_bd) { size += (P_)BLOCK_ROUND_UP(start) - start; start_bd = start_bd->link; while (start_bd != gen->scan_bd) { size += BLOCK_SIZE_W; start_bd = start_bd->link; } size += gen->scan - (P_)BLOCK_ROUND_DOWN(gen->scan); } else { size = gen->scan - start; } debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); } #endif break; default: barf("scavenge_one: strange object %d", (int)(info->type)); } no_luck = gct->failed_to_evac; gct->failed_to_evac = rtsFalse; return (no_luck); }
void GarbageCollector::scavenge(Heap * heap){ scavenge(heap, cs, ds); }