Example #1
0
 void do_object(oop obj) {
   if (obj->is_shared()) {
     return;
   }
   if (obj->is_gc_marked() && obj->forwardee() == NULL) {
     int s = obj->size();
     oop sh_obj = (oop)_space->allocate(s);
     if (sh_obj == NULL) {
       if (_read_only) {
         warning("\nThe permanent generation read only space is not large "
                 "enough to \npreload requested classes.  Use "
                 "-XX:SharedReadOnlySize= to increase \nthe initial "
                 "size of the read only space.\n");
       } else {
         warning("\nThe permanent generation read write space is not large "
                 "enough to \npreload requested classes.  Use "
                 "-XX:SharedReadWriteSize= to increase \nthe initial "
                 "size of the read write space.\n");
       }
       exit(2);
     }
     if (PrintSharedSpaces && Verbose && WizardMode) {
       tty->print_cr("\nMoveMarkedObjects: " PTR_FORMAT " -> " PTR_FORMAT " %s", obj, sh_obj,
                     (_read_only ? "ro" : "rw"));
     }
     Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)sh_obj, s);
     obj->forward_to(sh_obj);
     if (_read_only) {
       // Readonly objects: set hash value to self pointer and make gc_marked.
       sh_obj->forward_to(sh_obj);
     } else {
       sh_obj->init_mark();
     }
   }
 }
void DefNewGeneration::handle_promotion_failure(oop old) {
  log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());

  _promotion_failed = true;
  _promotion_failed_info.register_copy_failure(old->size());
  preserve_mark_if_necessary(old, old->mark());
  // forward to self
  old->forward_to(old);

  _promo_failure_scan_stack.push(old);

  if (!_promo_failure_drain_in_progress) {
    // prevent recursion in copy_to_survivor_space()
    _promo_failure_drain_in_progress = true;
    drain_promo_failure_scan_stack();
    _promo_failure_drain_in_progress = false;
  }
}
Example #3
0
void DefNewGeneration::handle_promotion_failure(oop old) {
  if (PrintPromotionFailure && !_promotion_failed) {
    gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
                        old->size());
  }
  _promotion_failed = true;
  preserve_mark_if_necessary(old, old->mark());
  // forward to self
  old->forward_to(old);

  _promo_failure_scan_stack.push(old);

  if (!_promo_failure_drain_in_progress) {
    // prevent recursion in copy_to_survivor_space()
    _promo_failure_drain_in_progress = true;
    drain_promo_failure_scan_stack();
    _promo_failure_drain_in_progress = false;
  }
}
oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
  assert(is_in_reserved(old) && !old->is_forwarded(),
	 "shouldn't be scavenging this oop"); 
  size_t s = old->size();
  oop obj = NULL;
  
  // Try allocating obj in to-space (unless too old or won't fit or JVMPI
  // enabled)
  if (old->age() < tenuring_threshold() &&
      !Universe::jvmpi_slow_allocation()) {
    obj = (oop) to()->allocate(s);
  }

  // Otherwise try allocating obj tenured
  if (obj == NULL) {
    obj = _next_gen->promote(old, s, from);
    if (obj == NULL) {
      // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
      // is incorrectly set. In any case, its seriously wrong to be here!
      vm_exit_out_of_memory(s*wordSize, "promotion");
    }
  } else {
    // Prefetch beyond obj
    const intx interval = PrefetchCopyIntervalInBytes;
    atomic::prefetch_write(obj, interval);

    // Copy obj
    Memory::copy_words_aligned((HeapWord*)old, (HeapWord*)obj, s);

    // Increment age if obj still in new generation
    obj->incr_age(); 
    age_table()->add(obj, s);
  }

  if (Universe::jvmpi_move_event_enabled()) {
    Universe::jvmpi_object_move(old, obj);
  }

  // Done, insert forward pointer to obj in this header
  old->forward_to(obj);

  return obj;
}
oop DefNewGeneration::copy_to_survivor_space(oop old) {
  assert(is_in_reserved(old) && !old->is_forwarded(),
         "shouldn't be scavenging this oop");
  size_t s = old->size();
  oop obj = NULL;

  // Try allocating obj in to-space (unless too old)
  if (old->age() < tenuring_threshold()) {
    obj = (oop) to()->allocate(s);
  }

  // Otherwise try allocating obj tenured
  if (obj == NULL) {
    obj = _next_gen->promote(old, s);
    if (obj == NULL) {
      if (!HandlePromotionFailure) {
        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
        // is incorrectly set. In any case, its seriously wrong to be here!
        vm_exit_out_of_memory(s*wordSize, "promotion");
      }

      handle_promotion_failure(old);
      return old;
    }
  } else {
    // Prefetch beyond obj
    const intx interval = PrefetchCopyIntervalInBytes;
    Prefetch::write(obj, interval);

    // Copy obj
    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);

    // Increment age if obj still in new generation
    obj->incr_age();
    age_table()->add(obj, s);
  }

  // Done, insert forward pointer to obj in this header
  old->forward_to(obj);

  return obj;
}
oop DefNewGeneration::copy_to_survivor_space(oop old) {
  assert(is_in_reserved(old) && !old->is_forwarded(),
         "shouldn't be scavenging this oop");
  size_t s = old->size();
  oop obj = NULL;

  // Try allocating obj in to-space (unless too old)
  if (old->age() < tenuring_threshold()) {
    obj = (oop) to()->allocate(s);
  }

  // Otherwise try allocating obj tenured
  if (obj == NULL) {
    obj = _next_gen->promote(old, s);
    if (obj == NULL) {
      handle_promotion_failure(old);
      return old;
    }
  } else {
    // Prefetch beyond obj
    const intx interval = PrefetchCopyIntervalInBytes;
    Prefetch::write(obj, interval);

    // Copy obj
    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);

    // Increment age if obj still in new generation
    obj->incr_age();
    age_table()->add(obj, s);
  }

  // Done, insert forward pointer to obj in this header
  old->forward_to(obj);

  return obj;
}