static void handle_overflow( remset_t *rs, unsigned recorded, word *pooltop ) { DATA(rs)->stats.recorded += recorded; rs->live += recorded; DATA(rs)->curr_pool->top = pooltop; annoyingmsg( "Remset @0x%p overflow, entries=%d", (void*)rs, rs->live); rs->has_overflowed = TRUE; if (DATA(rs)->curr_pool->next == 0) { if ( recycled_pool == NULL ) { DATA(rs)->curr_pool->next = allocate_pool_segment( DATA(rs)->pool_entries, DATA(rs)->mem_attribute ); } else { pool_t* p = recycled_pool; p->top = p->bot; p->next = 0; DATA(rs)->curr_pool->next = p; recycled_pool = p->next; } DATA(rs)->numpools++; } DATA(rs)->curr_pool = DATA(rs)->curr_pool->next; assert( DATA(rs)->curr_pool != 0 ); }
/* C_wb_compact: some SSB filled up, and must be compacted. */ void C_wb_compact( int generation ) { annoyingmsg( "Generation %d: SSB filled up during mutator operation.", generation ); in_noninterruptible_syscall = 1; gc_compact_all_ssbs( the_gc( globals ) ); /* Ignores remset overflows. */ in_noninterruptible_syscall = 0; }
static remset_t * create_labelled_remset_with_owner_attrib ( int tbl_entries, /* size of hash table, 0=default */ int pool_entries, /* size of remset, 0 = default */ int major_id, /* for stats */ int minor_id, /* for stats */ unsigned owner_attrib ) { word *heapptr; remset_t *rs; remset_data_t *data; pool_t *p; assert( tbl_entries >= 0 && (tbl_entries == 0 || ilog2( tbl_entries ) != -1)); assert( pool_entries >= 0 ); if (pool_entries == 0) pool_entries = DEFAULT_REMSET_POOLSIZE; if (tbl_entries == 0) tbl_entries = DEFAULT_REMSET_TBLSIZE; annoyingmsg( "Allocated remembered set\n hash=%d pool=%d", tbl_entries, pool_entries ); rs = (remset_t*)must_malloc( sizeof( remset_t ) ); data = (remset_data_t*)must_malloc( sizeof( remset_data_t ) ); while(1) { heapptr = gclib_alloc_rts( tbl_entries*sizeof(word), owner_attrib ); if (heapptr != 0) break; memfail( MF_RTS, "Can't allocate table and SSB for remembered set." ); } /* Hash table */ data->tbl_bot = heapptr; heapptr += tbl_entries; data->tbl_lim = heapptr; /* Node pool */ p = allocate_pool_segment( pool_entries, data->mem_attribute ); /* XXX */ data->first_pool = data->curr_pool = p; assert( data->curr_pool != 0 ); data->numpools = 1; /* Misc */ memset( &data->stats, 0, sizeof( data->stats )); data->pool_entries = pool_entries; data->self = stats_new_remembered_set( major_id, minor_id ); data->mem_attribute = owner_attrib; rs->live = 0; rs->has_overflowed = FALSE; rs->data = data; rs_clear( rs ); return rs; }
/* NOTE: A copy of this code exists in Sparc/memory.s; if you change * anything here, check that code as well. */ int stk_restore_frame( word *globals ) { word *stktop, *hframe, *p; word retoffs, proc, codeaddr, codeptr, header; unsigned size; assert2(globals[ G_STKP ] == globals[ G_STKBOT ]); hframe = ptrof( globals[ G_CONT ] ); size = roundup8( sizefield( *hframe ) + 4 ); /* bytes to copy */ stktop = (word*)globals[ G_STKP ]; stktop -= size / 4; if (stktop < (word*)globals[ G_ETOP ]) { supremely_annoyingmsg( "Failed to create stack." ); return 0; } globals[ G_STKP ] = (word)stktop; globals[ G_STKUFLOW ] += 1; #if 0 annoyingmsg("Restore: %d", size); #endif /* copy the frame onto the stack */ p = stktop; while (size) { *p++ = *hframe++; *p++ = *hframe++; size -= 8; } /* Follow continuation chain. */ globals[ G_CONT ] = *(stktop+STK_DYNLINK); header = *(stktop+HC_HEADER); retoffs = *(stktop+HC_RETOFFSET); proc = *(stktop+HC_PROC); /* convert the header back to a fixnum */ *(stktop+STK_CONTSIZE) = sizefield(header); /* convert the return address */ if (proc != 0) { codeptr = *(ptrof( proc )+PROC_CODEPTR); if (tagof( codeptr ) == BVEC_TAG) { codeaddr = (word)ptrof( codeptr ); *(stktop+STK_RETADDR) = (codeaddr+4)+retoffs; } else { *(stktop+STK_RETADDR) = retoffs; } } else { *(stktop+STK_RETADDR) = retoffs; } return 1; }
extbmp_t *create_extensible_bitmap_params( gc_t *gc, gc_param_t *info, int leaf_bytes, int entries_per_node ) { /* Creates bitmap representing empty set of addresses */ extbmp_t *ebmp; int depth; int max_leaves; int leaf_words = CEILDIV(leaf_bytes, sizeof(word)); long long address_range_in_words; assert( (leaf_bytes % MIN_BYTES_PER_OBJECT) == 0 ); max_leaves = SHIFTED_ADDRESS_SPACE / (BITS_PER_WORD * leaf_words); assert( max_leaves > 0 ); { /* calculate max depth of tree */ int i = 0; long long tot = 1; long long addr_range = ADDRS_PER_WORD * leaf_words; while (tot < max_leaves) { i += 1; tot *= entries_per_node; addr_range *= entries_per_node; } depth = i; address_range_in_words = addr_range; } ebmp = (extbmp_t*)must_malloc( sizeof( extbmp_t )); ebmp->gc = gc; ebmp->leaf_words = CEILDIV(leaf_bytes,sizeof(word)); ebmp->entries_per_inode = entries_per_node; ebmp->depth = depth; ebmp->tree = alloc_inode( ebmp, address_range_in_words, 0, (((long long)SHIFTED_ADDRESS_SPACE) << BIT_IDX_SHIFT)); ebmp->mru_cache.leaf = NULL; ebmp->mru_cache.first_addr_for_leaf = 0; ebmp->gno_count = gc->gno_count; ebmp->gno_to_leaf = (leaf_t**)must_malloc( sizeof(leaf_t*) * ebmp->gno_count ); { int i; for (i = 0; i < ebmp->gno_count; i++) { ebmp->gno_to_leaf[i] = NULL; } } ebmp->leaf_count = 0; annoyingmsg( "ebmp{gc,leaf_words=%d,entries_per_inode=%d,depth=%d,tree} max_leaves:%d", ebmp->leaf_words, ebmp->entries_per_inode, ebmp->depth, max_leaves ); return ebmp; }
void stk_flush( word *globals ) { word *stktop, *stkbot, *first, *prev; word retaddr, codeaddr, codeptr, proc, size; unsigned framecount; assert2( tagof( globals[ G_REG0 ]) == PROC_TAG ); stktop = (word*)globals[ G_STKP ]; stkbot = (word*)globals[ G_STKBOT ]; stack_state.words_flushed += (stkbot-stktop); first = prev = 0; framecount = 0; while (stktop < stkbot) { size = *(stktop+STK_CONTSIZE); retaddr = *(stktop+STK_RETADDR); /* convert header to vector header */ assert2( size % 4 == 0 ); /* size must be words, a fixnum */ assert2( (s_word)size >= 12 ); /* 3-word minimum, and nonnegative */ *(stktop+HC_HEADER) = mkheader( size, VEC_HDR ); /* convert return address */ proc = *(stktop+STK_REG0); if (proc != 0) { assert2( tagof( proc ) == PROC_TAG ); codeptr = *(ptrof( proc )+PROC_CODEPTR); if (tagof( codeptr ) == BVEC_TAG) { codeaddr = (word)ptrof( codeptr ); *(stktop+HC_RETOFFSET) = retaddr-(codeaddr+4); } else { *(stktop+HC_RETOFFSET) = retaddr; } } else { *(stktop+HC_RETOFFSET) = retaddr; } /* chain things together */ if (first == 0) first = stktop; else *(prev+HC_DYNLINK) = (word)tagptr( stktop, VEC_TAG ); prev = stktop; framecount++; size = roundup8( size+4 ); stktop += size / 4; #if 0 annoyingmsg("Flush: %d", size ); #endif } if (prev != 0) *(prev+HC_DYNLINK) = globals[ G_CONT ]; if (first != 0) globals[ G_CONT ] = (word)tagptr( first, VEC_TAG ); globals[ G_STKBOT ] = globals[ G_STKP ]; stack_state.frames_flushed += framecount; }
void scheme_start( word *globals ) { cont_t f = 0; word *stkp = (word*)globals[ G_STKP ]; int x; jmp_buf *old_jump_buffer = dispatch_jump_buffer; if (already_running) annoyingmsg( "Recursive call to scheme_start (FFI?)" ); already_running = 1; dispatch_jump_buffer = gclib_alloc_rts(sizeof(jmp_buf), 0); if (dispatch_jump_buffer == NULL) panic_abort("Couldn't allocate fresh jmp_buf"); #if 0 /* Patch in bootstrap code if necessary */ if (procedure_ref( globals[ G_REG0 ], IDX_PROC_CODE ) == FALSE_CONST) procedure_set( globals[ G_REG0 ], IDX_PROC_CODE, (word)twobit_start ); #endif /* Return address for bottom-most frame */ stkp[ STK_RETADDR ] = (word)i386_dispatch_loop_return; stkp[ STK_REG0 ] = 0; /* The dispatch loop is a doubly-nested quasi-loop. The outer loop uses setjmp/longjmp for control and is entered but rarely; most of the time is spent in the inner loop. The job of the outer loop is to provide the inner loop with the address of the first block to execute. The inner loop is implemented entirely in compiled code: we just jump to the entry point, and any return is done through a longjmp to the outer loop. */ /* Outer loop */ switch (x = setjmp( *dispatch_jump_buffer )) { case 0 : case DISPATCH_CALL_R0 : assert2( tagof( globals[ G_REG0 ]) == PROC_TAG ); f = procedure_ref( globals[ G_REG0 ], IDX_PROC_CODE ); f = (cont_t)(ptrof(f)+1); /* skip over bytevector header */ break; case DISPATCH_EXIT: already_running = 0; gclib_free(dispatch_jump_buffer, sizeof(jmp_buf)); dispatch_jump_buffer = old_jump_buffer; return; case DISPATCH_RETURN_FROM_S2S_CALL : panic_exit( "DISPATCH_RETURN_FROM_S2S_CALL shouldn't happen." ); break; case DISPATCH_STKUFLOW : f = refill_stack_cache( globals ); globals[ G_STKP ] += 4+4*STK_RETADDR; /* The '4*' compensates for layouts.cfg oversight */ break; case DISPATCH_SIGFPE : handle_sigfpe( globals ); panic_exit( "handle_sigfpe() returned." ); default : panic_exit( "Unexpected value %d from setjmp in scheme_start()", x ); } /* Inner loop */ i386_scheme_jump(globals,f); /* Never returns */ }