Esempio n. 1
0
slotsOop slotsMap::copy_remove_one_slot(slotsOop obj, slotDesc *slot,
                                        bool mustAllocate) {
  assert_slots(obj, "object isn't a slotsOop");
  assert(!obj->is_string(), "cannot clone strings!");
  assert(slot >= slots() && slot < slotsMap::slot(length_slots()),
         "slotDesc not part of map");

  slotsMap* new_map= (slotsMap*) remove(slot, 1, mustAllocate);
  if (new_map == NULL) return slotsOop(failedAllocationOop);
  new_map->slots_length = new_map->slots_length->decrement();
  new_map->init_dependents();
  mapOop new_moop = new_map->enclosing_mapOop();
  new_moop->init_mark();

  slotsOop new_obj;
  switch (slot->type->slot_type()) {
   case obj_slot_type:
    assert_smi(slot->data, "data slot contents isn't an offset");
    new_obj= obj->is_byteVector()
      ? (slotsOop) byteVectorOop(obj)->remove(object_size(obj),
                                              smiOop(slot->data)->value(), 
                                              1, mustAllocate, true)
      : (slotsOop) slotsOop(obj)->remove(object_size(obj),
                                         smiOop(slot->data)->value(), 
                                         1, mustAllocate, true);
    if (oop(new_obj) == failedAllocationOop)
      return slotsOop(failedAllocationOop);
    // check-stores done by remove already
    new_map->shift_obj_slots(smiOop(slot->data), -1);
    new_map->object_length = new_map->object_length->decrement();
    break;
   case arg_slot_type: {
    // fix up any arg slots after this one
    assert_smi(slot->data, "bad arg index");
    fint argIndex= smiOop(slot->data)->value();
    FOR_EACH_SLOTDESC(new_map, s) {
      if (s->is_arg_slot()) {
        assert_smi(s->data, "bad arg index");
        fint a= smiOop(s->data)->value();
        if (a > argIndex)
          s->data= as_smiOop(a - 1);
      }
    }
   }
   // fall through     
   case map_slot_type:
    new_obj= slotsOop(obj->clone(mustAllocate));
    if (oop(new_obj) == failedAllocationOop)
      return slotsOop(failedAllocationOop);
    break;
   default:
    ShouldNotReachHere(); // unexpected slot type;
  }

  new_obj->set_canonical_map(new_map);
  
  return new_obj;
}
Esempio n. 2
0
void space::relocate_bytes() {
  if (bytes_bottom != old_bytes_bottom) {
    for (oop* p = objs_bottom; p < objs_top; ) {
      oopsOop m = as_oopsOop(p);
      if (m->is_byteVector()) {
        byteVectorOop(m)->relocate_bytes(this);
      }
      p += m->size();
    }
  }
}
Esempio n. 3
0
slotsOop slotsMap::copy_add_new_slot(slotsOop  obj, 
                                     stringOop name,
                                     slotType  slot_type,
                                     oop       contents,
                                     oop       anno,
                                     bool      mustAllocate) {
  assert_slots(obj, "object isn't a slotsOop");
  assert(!obj->is_string(), "cannot clone strings!");

  bool found;
  fint newIndex= find_slot_index_for(name, found);
  assert(!found, "I only add new slots");
  slotsMap* new_map= (slotsMap*) insert(newIndex, mustAllocate);
  if (new_map == NULL) return slotsOop(failedAllocationOop);

  slotDesc* s= new_map->slot(newIndex);
  new_map->slots_length= new_map->slots_length->increment();
  mapOop new_moop= new_map->enclosing_mapOop();
  new_moop->init_mark();
  new_map->init_dependents();

  slotsOop new_obj;
  switch (slot_type->slot_type()) {
   case obj_slot_type: {
    assert(NakedMethods || !contents->has_code() || slot_type->is_vm_slot(),
           "adding an assignable slot with code");
    // find which offset this slot should be at
    fint offset= empty_object_size();
    for (fint i= newIndex - 1; i >= 0; --i)
      if (slot(i)->is_obj_slot()) {
        offset= smiOop(slot(i)->data)->value() + 1;
        break;
      }
    new_obj= obj->is_byteVector()
              ? (slotsOop) byteVectorOop(obj) ->
                  insert(object_size(obj), offset, 1, mustAllocate, true)
              : (slotsOop) slotsOop(obj) ->
                  insert(object_size(obj), offset, 1, mustAllocate, true);
    if (oop(new_obj) == failedAllocationOop)
      return slotsOop(failedAllocationOop);
    new_map->shift_obj_slots(as_smiOop(offset), 1);
    new_map->object_length = new_map->object_length->increment();
    new_obj->at_put(offset, contents, false);
    new_obj->fix_generation(new_map->object_size(new_obj));
    contents= as_smiOop(offset);   // tagged index of slot data
    break; }
   case map_slot_type:
    new_obj= slotsOop(obj->clone(mustAllocate));
    break;
   case arg_slot_type:
    assert_smi(contents, "argument index isn't a smiOop");
    new_obj= slotsOop(obj->clone(mustAllocate));
    break;
   default:
    ShouldNotReachHere(); // unexpected slot type
  }
  if (oop(new_obj) == failedAllocationOop)
    return slotsOop(failedAllocationOop);
  s->init(name, slot_type, contents, anno, false);
  new_moop->fix_generation(new_moop->size());
  new_obj->set_canonical_map(new_map);
  
  return new_obj;
}
Esempio n. 4
0
void space::compact(mapOop unmarked_map_map,
                    space*& copySpace,
                    oop*& d,
                    oop*& bd) {
  // compact oops and bytes (outwards in), place copies in copySpace
  // (and successors, if necessary)

  // Leave sentinel at end of oops part
  // (utilises extra word between objs and bytes part).
  // This causes the is_object_start() loop below to exit.
  set_objs_top_sentinel(badOop);

  if (copySpace == this) {
    d=  objs_bottom;
    bd= bytes_top;
  }
  
  for (oop* p= objs_bottom; p < objs_top; ) {
    oopsOop obj = as_oopsOop(p);

    if (obj->is_gc_marked()) {
      // object survives GC
      // figure out size
      Map* nm = mapOop(obj->map()->enclosing_mapOop()->gc_unmark())->map_addr();
      fint size = nm->object_size(obj);
      byteVectorOop bv= NULL;
      int32 bsize= 0;
      if (nm->is_byteVector()) {
        bv= byteVectorOop(obj);
        bsize= bv->lengthWords();
      }

      if (copySpace != this && !copySpace->would_fit(size, bsize)) {
        copySpace= ((oldSpace*)copySpace)->next_space;
        d=  copySpace->objs_bottom;
        bd= copySpace->bytes_top;
      }

      // check for special map processing
      if (obj->map() == Memory->map_map) {
        // adjust dependencies first
        as_mapOop(p)->map_addr()->shift_map(as_mapOop(d)->map_addr());
      }

      // do compaction
      if (bv) {
        // compact bytes part up
        oop* bp = (oop*) bv->bytes();
        assert(copySpace != this  ||  bp + bsize <= bd,
               "bytes parts aren't in order");
        copy_words_down((int32*)bp + bsize, (int32*)bd, bsize);
        bd -= bsize;
        bv->set_bytes((char*) bd);
      }
      // compact oops part down
      copy_oops_up(p, d, size);
      as_oopsOop(d)->gc_moved();
      d += size;
      p += size;
      if (copySpace != this) {
        copySpace->objs_top= d;
        copySpace->bytes_bottom= bd;
      }
    } else {
      // object is dying

      // check for special map processing
      if (((memOopClass*)p)->_map == unmarked_map_map) {
        // delete the dying map
        as_mapOop(p)->map_addr()->delete_map();
      }

      // skip to next object
      // (can't use object's map to compute object size,
      //  since it might be destroyed by now)
      for (p += 2;      // skip mark and map
           !is_object_start(*p);
           p++) ;
      assert(p <= objs_top, "compacter ran off end");
    }
  }
  assert(d < bd, "didn't compact anything");
  if (copySpace == this) {
    objs_top= d;
    bytes_bottom= bd;
  }
}