void space::enumerate_maps(enumeration* e) { for (oop* p = objs_top; p > objs_bottom && e->is_ok(); ) { p = find_this_object(p - 1); oopsOop obj = as_oopsOop(p); if (obj->is_map()) { e->filter_map(mapOop(obj)); } } }
void newGeneration::adjust_maps() { int n= 0, nDead= 0, nSurv= 0; // gather stats of length of list MapList *mapl; for ( MapList **prevp= &map_list; mapl= *prevp, mapl != NULL; n++) { slotsMapDeps *map= mapl->map; mapOop m= map->enclosing_mapOop(); if (m->is_forwarded()) { slotsMapDeps* oldMap= map; m= mapOop(m->forwardee()); map= (slotsMapDeps*)m->map_addr(); map->forward_map(oldMap); // shift dependency links } assert_map(m, "new map list contains a non-map"); enum { was_tenured, died, is_alive_and_young } what_happened = (char*)m >= high_boundary ? was_tenured : ( Memory->should_scavenge(m) ? died : is_alive_and_young ); switch (what_happened) { case is_alive_and_young: // survived to to-space: adjust list nSurv++; mapl->map= map; // update list prevp= &(mapl->next); // goto next list elem break; case died: // died in eden- or from-space: delete map nDead++; map->delete_map(); // and FALL THROUGH to remove link from list case was_tenured: // leave map alive, but remove from list entirely MapList *next= mapl->next; delete mapl; *prevp= next; break; } } if (PrintNewMapListScavengeStats) lprintf("%d new maps, %d died, %d survived, %d tenured\n", n, nDead, nSurv, n-nDead-nSurv); }
nmethod* cacheProbingLookup::findMethodToReuse() { if ( !mightBeAbleToReuseNMethod() ) return (nmethod*)cannotReuse; // build key for canonical method oop resultMH= result()->methodHolder_or_map(receiver); mapOop resultMHmapOop= resultMH->is_map() ? mapOop(resultMH) : resultMH->map()->enclosing_mapOop(); MethodLookupKey ck( NormalLookupType, MH_NOT_A_RESEND, resultMHmapOop, selector(), 0); canonical_key= ck; // copy info nmethod* nm= Memory->code->lookup(canonical_key, needDebug); if ( nm && nm->reusable() ) return nm; return (nmethod*) (shouldCompileReusableNMethod( nm) ? compileAndReuse : cannotReuse); }
void Lookup::findInObject(oop_t r) { MapObj* m_addr = MapObj::from(mapOop(r)); if (m_addr->lookup_is_marked()) return; m_addr->lookup_mark(); SlotDesc* sd = m_addr->find_slot(selector); if (sd != NULL) result.add_slot(r, sd); else findInParentsOf(r, m_addr); m_addr->lookup_unmark(); }
bool is_equal(oop m, oop n) { if (is_boolean_map(m) && is_boolean_map(n) ) return true; return mapOop(m)->equal( mapOop(n)); }
Lookup::Result* Lookup::findSlotsIn( oop_t rcvr, oop_t selector, LookupType lt ) { static bool reentered = false; if (reentered) fatal("reentered"); reentered = true; static Lookup lp; lp.init(selector, lt); if (baseLookupType(lt) == ResendBaseLookupType) lp.findInParentsOf(rcvr, MapObj::from(mapOop(rcvr))); else lp.findInObject(rcvr); reentered = false; return &lp.result; }
void space::compact(mapOop unmarked_map_map, space*& copySpace, oop*& d, oop*& bd) { // compact oops and bytes (outwards in), place copies in copySpace // (and successors, if necessary) // Leave sentinel at end of oops part // (utilises extra word between objs and bytes part). // This causes the is_object_start() loop below to exit. set_objs_top_sentinel(badOop); if (copySpace == this) { d= objs_bottom; bd= bytes_top; } for (oop* p= objs_bottom; p < objs_top; ) { oopsOop obj = as_oopsOop(p); if (obj->is_gc_marked()) { // object survives GC // figure out size Map* nm = mapOop(obj->map()->enclosing_mapOop()->gc_unmark())->map_addr(); fint size = nm->object_size(obj); byteVectorOop bv= NULL; int32 bsize= 0; if (nm->is_byteVector()) { bv= byteVectorOop(obj); bsize= bv->lengthWords(); } if (copySpace != this && !copySpace->would_fit(size, bsize)) { copySpace= ((oldSpace*)copySpace)->next_space; d= copySpace->objs_bottom; bd= copySpace->bytes_top; } // check for special map processing if (obj->map() == Memory->map_map) { // adjust dependencies first as_mapOop(p)->map_addr()->shift_map(as_mapOop(d)->map_addr()); } // do compaction if (bv) { // compact bytes part up oop* bp = (oop*) bv->bytes(); assert(copySpace != this || bp + bsize <= bd, "bytes parts aren't in order"); copy_words_down((int32*)bp + bsize, (int32*)bd, bsize); bd -= bsize; bv->set_bytes((char*) bd); } // compact oops part down copy_oops_up(p, d, size); as_oopsOop(d)->gc_moved(); d += size; p += size; if (copySpace != this) { copySpace->objs_top= d; copySpace->bytes_bottom= bd; } } else { // object is dying // check for special map processing if (((memOopClass*)p)->_map == unmarked_map_map) { // delete the dying map as_mapOop(p)->map_addr()->delete_map(); } // skip to next object // (can't use object's map to compute object size, // since it might be destroyed by now) for (p += 2; // skip mark and map !is_object_start(*p); p++) ; assert(p <= objs_top, "compacter ran off end"); } } assert(d < bd, "didn't compact anything"); if (copySpace == this) { objs_top= d; bytes_bottom= bd; } }
abstract_interpreter_method_info::abstract_interpreter_method_info( byteVectorOop codes, objVectorOop literals) { _map_oop= mapOop(badOop); init(codes, literals); }