slotsOop slotsMap::copy_remove_one_slot(slotsOop obj, slotDesc *slot, bool mustAllocate) { assert_slots(obj, "object isn't a slotsOop"); assert(!obj->is_string(), "cannot clone strings!"); assert(slot >= slots() && slot < slotsMap::slot(length_slots()), "slotDesc not part of map"); slotsMap* new_map= (slotsMap*) remove(slot, 1, mustAllocate); if (new_map == NULL) return slotsOop(failedAllocationOop); new_map->slots_length = new_map->slots_length->decrement(); new_map->init_dependents(); mapOop new_moop = new_map->enclosing_mapOop(); new_moop->init_mark(); slotsOop new_obj; switch (slot->type->slot_type()) { case obj_slot_type: assert_smi(slot->data, "data slot contents isn't an offset"); new_obj= obj->is_byteVector() ? (slotsOop) byteVectorOop(obj)->remove(object_size(obj), smiOop(slot->data)->value(), 1, mustAllocate, true) : (slotsOop) slotsOop(obj)->remove(object_size(obj), smiOop(slot->data)->value(), 1, mustAllocate, true); if (oop(new_obj) == failedAllocationOop) return slotsOop(failedAllocationOop); // check-stores done by remove already new_map->shift_obj_slots(smiOop(slot->data), -1); new_map->object_length = new_map->object_length->decrement(); break; case arg_slot_type: { // fix up any arg slots after this one assert_smi(slot->data, "bad arg index"); fint argIndex= smiOop(slot->data)->value(); FOR_EACH_SLOTDESC(new_map, s) { if (s->is_arg_slot()) { assert_smi(s->data, "bad arg index"); fint a= smiOop(s->data)->value(); if (a > argIndex) s->data= as_smiOop(a - 1); } } } // fall through case map_slot_type: new_obj= slotsOop(obj->clone(mustAllocate)); if (oop(new_obj) == failedAllocationOop) return slotsOop(failedAllocationOop); break; default: ShouldNotReachHere(); // unexpected slot type; } new_obj->set_canonical_map(new_map); return new_obj; }
oop slotsMap::change_slot(oop obj, slotDesc* slot, slotType type, oop contents, oop anno, bool mustAllocate) { assert(slot != NULL, "cannot change the contents of a non-existent slot"); assert(!obj->is_string(), "cannot clone strings!"); assert_slots(obj, "object isn't a slotsOop"); slotsOop new_obj= slotsOop(obj->clone(mustAllocate)); if (oop(new_obj) == failedAllocationOop) return failedAllocationOop; switch (slot->type->slot_type()) { case obj_slot_type: assert(NakedMethods || !contents->has_code() || slot->type->is_vm_slot(), "adding an assignable slot with code"); assert_smi(slot->data, "data slot contents isn't an offset"); new_obj->at_put(smiOop(slot->data)->value(), contents); break; case map_slot_type: break; case arg_slot_type: assert_smi(contents, "argument index isn't a smiOop"); break; default: ShouldNotReachHere(); // unexpected slot type } if ( slot->data == contents && slot->type == type && slot->get_annotation() == anno) { // no change (to the map, at least)! return new_obj; } // create a new map for this object slotsMap* new_map= copy_for_changing(mustAllocate); if (new_map == NULL) return failedAllocationOop; slot = slot->shift(this, new_map); slot->type = type; slot->set_annotation(anno); if (!slot->is_obj_slot()) { Memory->store(&slot->data, contents); } new_obj->set_canonical_map(new_map); return new_obj; }
// an optimized version inline oop blockOopClass::really_clone_block(smiOop fp) { assert_block(this, "not a block"); assert_smi(fp, "not a smallInt or pointer"); NumberOfBlockClones++; const int32 size = sizeof(blockOopClass)/oopSize; // would be cleaner (but slightly slower in the fast case) to do // b= Memory->alloc_objs(size); // if (Memory->new_gen->eden_space->contains(b)) ... blockOopClass* b= (blockOopClass*) Memory->new_gen->eden_space->alloc_objs_local(size); if (b) { # if GENERATE_DEBUGGING_AIDS if (CheckAssertions && b == (blockOopClass*)catchThisOne) { warning1("blockOopClass::really_clone_block caught 0x%lx", b); } # endif // allocated in eden; don't do check stores or scavenging b->_map = addr()->_map; b->setHomeFr(fp); blockOop b1 = as_blockOop(b); b1->init_mark(); return b1; } else { // overflowed eden; do check stores when done b = (blockOopClass*) Memory->alloc_objs(size); Memory->store((oop*) &b->_map, addr()->_map); b->setHomeFr(fp); blockOop b1 = as_blockOop(b); b1->init_mark(); return b1; } }
blockOop blockOopClass::clone_and_set_desc(smiOop descOffset) { assert_smi(descOffset, "must be an int"); blockMap *newMap= ((blockMap*)map())->clone_and_set_desc(descOffset); blockOop newBlock= copy(); newBlock->set_map(newMap); return newBlock; }
oop slotsMap::copy_add_argument_slot(slotsOop obj, stringOop name, slotType type, oop contents, oop anno, bool mustAllocate) { assert_smi(contents, "arg data must be position"); if (!name->is_unary()) return ErrorCodes::vmString_prim_error(ARGUMENTCOUNTERROR); slotDesc* old = find_slot(name); slotsOop result; if (old == NULL) result= obj; else if (old->is_arg_slot()) { // No need to remove and reinsert because order is the same. // Only the annotation might be really different. // The index will be off by one (assumes that added slot is new) assert(smiOop(contents)->value() == smiOop(old->data)->value() + 1, "arg index wrong"); return change_slot(obj, old, type, old->data, anno, mustAllocate); } else { result= (slotsOop)copy_remove_slot(obj, name, mustAllocate); if (oop(result) == failedAllocationOop || result->is_mark()) return result; assert(result->is_slots(), "just checking"); } assert(smiOop(contents)->value() == arg_count(), "arg index wrong"); return ((slotsMap*)result->map())->copy_add_new_slot(result, name, type, contents, anno, mustAllocate); }
void SendNode::gen() { BasicNode::gen(); offset = theAssembler->offset(); assert(bci() != IllegalBCI, "should have legal bci"); genPcDesc(); genBreakpointBeforeCall(); theAssembler->CallB(Memory->code->trapdoors->SendMessage_stub_td()); theAssembler->Nop(); theAssembler->Data(mask()); nlrCode(); theAssembler->Zero(); // nmlns theAssembler->Zero(); if (sel != badOop) { if (isPerformLookupType(l)) { assert_smi(sel, "should be an integer argcount"); theAssembler->Data(smiOop(sel)->value()); // really arg count } else { assert_string(sel, "should be a string constant"); theAssembler->Data(sel); // constant selector } } if ((l & UninlinableSendMask) == 0) theSIC->noInlinableSends = false; theAssembler->Data(l); verifySendInfo(); if (del) { assert(needsDelegatee(l), "shouldn't have a delegatee"); theAssembler->Data(del); } }
Label* CodeGen::SendDesc(RegisterState* s, LookupType lookupType, oop selector, oop delegatee) { a.Comment("begin SendDesc"); Label past_send_desc(a.printing); a.jmp(&past_send_desc); s->genMask(); // mask of used regs Label* l = new Label(a.printing); a.jmp(l); // non-local return code assert((a.offset() & Tag_Mask) == 0, "must be aligned"); a.Zero(); // nmlns a.Zero(); if (selector != badOop) { if (isPerformLookupType(lookupType)) { assert_smi(selector, "should be an integer argcount"); a.Data(smiOop(selector)->value(), true); // really arg count } else { assert_string(selector, "should be a string constant"); a.Data(selector, true); // constant selector } } # ifdef SIC_COMPILER if (theCompiler->containsLoop) { // need counters for the sends to know how often the loop executes a.Data(withCountBits(lookupType, Counting), true); } else { a.Data(lookupType, true); } # else a.Data(lookupType, true); # endif # if GENERATE_DEBUGGING_AIDS if (CheckAssertions) switch (lookupType) { case DirectedResendLookupType: assert(lookupType & DelegateeStaticBit, "should have static delegatee"); assert_string(delegatee, "should be a string"); // fall through case ImplicitSelfLookupType: case ResendLookupType: case StaticNormalLookupType: case NormalLookupType: assert(!isPerformLookupType(lookupType), "should have a static selector"); assert_string(selector, "should be a string"); break; default: break; } # endif if (delegatee != badOop) { assert(needsDelegatee(lookupType), "shouldn't have a delegatee"); a.Data(delegatee, true); } past_send_desc.define(); a.Comment("end SendDesc"); return l; }
void blockOopClass::setScope(frame* newScope) { blockOopClass *b= addr(); frame* f= b->homeFr(); if (NLRSupport::is_bad_home_reference((char*)f)) { // prototype block or non-LIFO block if (newScope != NULL) ShouldNotReachHere(); // shouldn't try to set scope pointer } else { assert_smi(newScope, "should be a word-aligned pointer"); b->setHomeFr(smiOop(newScope)); } }
Label* CodeGen::SendDesc(RegisterState* s, LookupType lookupType, oop selector, oop delegatee) { s->genMask(); // mask of used regs Label* l = a.BraForward(true); // non-local return code a.Nop(); a.Zero(); // nmlns a.Zero(); if (selector != badOop) { if (isPerformLookupType(lookupType)) { assert_smi(selector, "should be an integer argcount"); a.Data(smiOop(selector)->value()); // really arg count } else { assert_string(selector, "should be a string constant"); a.Data(selector); // constant selector } } if (theCompiler->containsLoop) { // need counters for the sends to know how often the loop executes a.Data(withCountBits(lookupType, Counting)); } else { a.Data(lookupType); } # if GENERATE_DEBUGGING_AIDS if (CheckAssertions) switch (lookupType) { case DirectedResendLookupType: assert(lookupType & DelegateeStaticBit, "should have static delegatee"); assert_string(delegatee, "should be a string"); // fall through case ImplicitSelfLookupType: case ResendLookupType: case StaticNormalLookupType: case NormalLookupType: assert(!isPerformLookupType(lookupType), "should have a static selector"); assert_string(selector, "should be a string"); break; default: break; } # endif if (delegatee != badOop) { assert(needsDelegatee(lookupType), "shouldn't have a delegatee"); a.Data(delegatee); } return l; }
void FSelfScope::initialize() { assert( isTop(), "can't inline yet"); // preallocate receiver, incoming args, locals self = receiver = IReceiverReg; allocs->allocatePermanent(receiver); { // Allocate space for arguments and count argument slots. nargs = 0; FOR_EACH_SLOTDESC_N(method()->map(), s, i) { args->append(UnAllocated); if (s->is_arg_slot()) { oop ind= s->data; assert_smi(ind, "bad index"); fint argIndex= smiOop(ind)->value(); allocs->allocatePermanent(IArgLocation(argIndex)); args->nthPut(i, IArgLocation(argIndex)); nargs++; } } }
slotsOop slotsMap::copy_add_new_slot(slotsOop obj, stringOop name, slotType slot_type, oop contents, oop anno, bool mustAllocate) { assert_slots(obj, "object isn't a slotsOop"); assert(!obj->is_string(), "cannot clone strings!"); bool found; fint newIndex= find_slot_index_for(name, found); assert(!found, "I only add new slots"); slotsMap* new_map= (slotsMap*) insert(newIndex, mustAllocate); if (new_map == NULL) return slotsOop(failedAllocationOop); slotDesc* s= new_map->slot(newIndex); new_map->slots_length= new_map->slots_length->increment(); mapOop new_moop= new_map->enclosing_mapOop(); new_moop->init_mark(); new_map->init_dependents(); slotsOop new_obj; switch (slot_type->slot_type()) { case obj_slot_type: { assert(NakedMethods || !contents->has_code() || slot_type->is_vm_slot(), "adding an assignable slot with code"); // find which offset this slot should be at fint offset= empty_object_size(); for (fint i= newIndex - 1; i >= 0; --i) if (slot(i)->is_obj_slot()) { offset= smiOop(slot(i)->data)->value() + 1; break; } new_obj= obj->is_byteVector() ? (slotsOop) byteVectorOop(obj) -> insert(object_size(obj), offset, 1, mustAllocate, true) : (slotsOop) slotsOop(obj) -> insert(object_size(obj), offset, 1, mustAllocate, true); if (oop(new_obj) == failedAllocationOop) return slotsOop(failedAllocationOop); new_map->shift_obj_slots(as_smiOop(offset), 1); new_map->object_length = new_map->object_length->increment(); new_obj->at_put(offset, contents, false); new_obj->fix_generation(new_map->object_size(new_obj)); contents= as_smiOop(offset); // tagged index of slot data break; } case map_slot_type: new_obj= slotsOop(obj->clone(mustAllocate)); break; case arg_slot_type: assert_smi(contents, "argument index isn't a smiOop"); new_obj= slotsOop(obj->clone(mustAllocate)); break; default: ShouldNotReachHere(); // unexpected slot type } if (oop(new_obj) == failedAllocationOop) return slotsOop(failedAllocationOop); s->init(name, slot_type, contents, anno, false); new_moop->fix_generation(new_moop->size()); new_obj->set_canonical_map(new_map); return new_obj; }