예제 #1
0
 void do_object(oop obj) {
   if (obj->is_shared_readwrite()) {
     if (obj->mark()->is_marked()) {
       obj->init_mark();         // Don't revisit this object.
       obj->adjust_pointers();   // Adjust this object's references.
     }
   }
 }
inline void MarkSweep::mark_object(oop obj) {
  // some marks may contain information we need to preserve so we store them away
  // and overwrite the mark.  We'll restore it at the end of markSweep.
  markOop mark = obj->mark();
  obj->set_mark(markOopDesc::prototype()->set_marked());

  if (mark->must_be_preserved(obj)) {
    preserve_mark(obj, mark);
  }
}
예제 #3
0
static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
  markOop mark = o->mark();
  if (!mark->has_bias_pattern()) {
    return HR_NOT_BIASED;
  }

  // Heuristics to attempt to throttle the number of revocations.
  // Stages:
  // 1. Revoke the biases of all objects in the heap of this type,
  //    but allow rebiasing of those objects if unlocked.
  // 2. Revoke the biases of all objects in the heap of this type
  //    and don't allow rebiasing of these objects. Disable
  //    allocation of objects of that type with the bias bit set.
  Klass* k = o->klass();
  jlong cur_time = os::javaTimeMillis();
  jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
  int revocation_count = k->biased_lock_revocation_count();
  if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
      (revocation_count <  BiasedLockingBulkRevokeThreshold) &&
      (last_bulk_revocation_time != 0) &&
      (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
    // This is the first revocation we've seen in a while of an
    // object of this type since the last time we performed a bulk
    // rebiasing operation. The application is allocating objects in
    // bulk which are biased toward a thread and then handing them
    // off to another thread. We can cope with this allocation
    // pattern via the bulk rebiasing mechanism so we reset the
    // klass's revocation count rather than allow it to increase
    // monotonically. If we see the need to perform another bulk
    // rebias operation later, we will, and if subsequently we see
    // many more revocation operations in a short period of time we
    // will completely disable biasing for this type.
    k->set_biased_lock_revocation_count(0);
    revocation_count = 0;
  }

  // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
  if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
    revocation_count = k->atomic_incr_biased_lock_revocation_count();
  }

  if (revocation_count == BiasedLockingBulkRevokeThreshold) {
    return HR_BULK_REVOKE;
  }

  if (revocation_count == BiasedLockingBulkRebiasThreshold) {
    return HR_BULK_REBIAS;
  }

  return HR_SINGLE_REVOKE;
}
예제 #4
0
inline void MarkSweep::mark_object(oop obj) {
#if INCLUDE_ALL_GCS
  if (G1StringDedup::is_enabled()) {
    // We must enqueue the object before it is marked
    // as we otherwise can't read the object's age.
    G1StringDedup::enqueue_from_mark(obj);
  }
#endif
  // some marks may contain information we need to preserve so we store them away
  // and overwrite the mark.  We'll restore it at the end of markSweep.
  markOop mark = obj->mark();
  obj->set_mark(markOopDesc::prototype()->set_marked());

  if (mark->must_be_preserved(obj)) {
    preserve_mark(obj, mark);
  }
}
void DefNewGeneration::handle_promotion_failure(oop old) {
  log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());

  _promotion_failed = true;
  _promotion_failed_info.register_copy_failure(old->size());
  preserve_mark_if_necessary(old, old->mark());
  // forward to self
  old->forward_to(old);

  _promo_failure_scan_stack.push(old);

  if (!_promo_failure_drain_in_progress) {
    // prevent recursion in copy_to_survivor_space()
    _promo_failure_drain_in_progress = true;
    drain_promo_failure_scan_stack();
    _promo_failure_drain_in_progress = false;
  }
}
예제 #6
0
void DefNewGeneration::handle_promotion_failure(oop old) {
  if (PrintPromotionFailure && !_promotion_failed) {
    gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
                        old->size());
  }
  _promotion_failed = true;
  preserve_mark_if_necessary(old, old->mark());
  // forward to self
  old->forward_to(old);

  _promo_failure_scan_stack.push(old);

  if (!_promo_failure_drain_in_progress) {
    // prevent recursion in copy_to_survivor_space()
    _promo_failure_drain_in_progress = true;
    drain_promo_failure_scan_stack();
    _promo_failure_drain_in_progress = false;
  }
}
예제 #7
0
inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
    assert(should_scavenge(&o), "Sanity");

    oop new_obj = NULL;

    // NOTE! We must be very careful with any methods that access the mark
    // in o. There may be multiple threads racing on it, and it may be forwarded
    // at any time. Do not use oop methods for accessing the mark!
    markOop test_mark = o->mark();

    // The same test as "o->is_forwarded()"
    if (!test_mark->is_marked()) {
        bool new_obj_is_tenured = false;
        size_t new_obj_size = o->size();

        // Find the objects age, MT safe.
        uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
                   test_mark->displaced_mark_helper()->age() : test_mark->age();

        if (!promote_immediately) {
            // Try allocating obj in to-space (unless too old)
            if (age < PSScavenge::tenuring_threshold()) {
                new_obj = (oop) _young_lab.allocate(new_obj_size);
                if (new_obj == NULL && !_young_gen_is_full) {
                    // Do we allocate directly, or flush and refill?
                    if (new_obj_size > (YoungPLABSize / 2)) {
                        // Allocate this object directly
                        new_obj = (oop)young_space()->cas_allocate(new_obj_size);
                        promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
                    } else {
                        // Flush and fill
                        _young_lab.flush();

                        HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
                        if (lab_base != NULL) {
                            _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
                            // Try the young lab allocation again.
                            new_obj = (oop) _young_lab.allocate(new_obj_size);
                            promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
                        } else {
                            _young_gen_is_full = true;
                        }
                    }
                }
            }
        }

        // Otherwise try allocating obj tenured
        if (new_obj == NULL) {
#ifndef PRODUCT
            if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
                return oop_promotion_failed(o, test_mark);
            }
#endif  // #ifndef PRODUCT

            new_obj = (oop) _old_lab.allocate(new_obj_size);
            new_obj_is_tenured = true;

            if (new_obj == NULL) {
                if (!_old_gen_is_full) {
                    // Do we allocate directly, or flush and refill?
                    if (new_obj_size > (OldPLABSize / 2)) {
                        // Allocate this object directly
                        new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
                        promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
                    } else {
                        // Flush and fill
                        _old_lab.flush();

                        HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
                        if(lab_base != NULL) {
#ifdef ASSERT
                            // Delay the initialization of the promotion lab (plab).
                            // This exposes uninitialized plabs to card table processing.
                            if (GCWorkerDelayMillis > 0) {
                                os::sleep(Thread::current(), GCWorkerDelayMillis, false);
                            }
#endif
                            _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
                            // Try the old lab allocation again.
                            new_obj = (oop) _old_lab.allocate(new_obj_size);
                            promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
                        }
                    }
                }

                // This is the promotion failed test, and code handling.
                // The code belongs here for two reasons. It is slightly
                // different than the code below, and cannot share the
                // CAS testing code. Keeping the code here also minimizes
                // the impact on the common case fast path code.

                if (new_obj == NULL) {
                    _old_gen_is_full = true;
                    return oop_promotion_failed(o, test_mark);
                }
            }
        }

        assert(new_obj != NULL, "allocation should have succeeded");

        // Copy obj
        Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);

        // Now we have to CAS in the header.
        if (o->cas_forward_to(new_obj, test_mark)) {
            // We won any races, we "own" this object.
            assert(new_obj == o->forwardee(), "Sanity");

            // Increment age if obj still in new generation. Now that
            // we're dealing with a markOop that cannot change, it is
            // okay to use the non mt safe oop methods.
            if (!new_obj_is_tenured) {
                new_obj->incr_age();
                assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
            }

            // Do the size comparison first with new_obj_size, which we
            // already have. Hopefully, only a few objects are larger than
            // _min_array_size_for_chunking, and most of them will be arrays.
            // So, the is->objArray() test would be very infrequent.
            if (new_obj_size > _min_array_size_for_chunking &&
                    new_obj->is_objArray() &&
                    PSChunkLargeArrays) {
                // we'll chunk it
                oop* const masked_o = mask_chunked_array_oop(o);
                push_depth(masked_o);
                TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
            } else {
예제 #8
0
static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
                                                                   bool bulk_rebias,
                                                                   bool attempt_rebias_of_object,
                                                                   JavaThread* requesting_thread) {
  assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");

  if (TraceBiasedLocking) {
    tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
                  INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
                  (bulk_rebias ? "rebias" : "revoke"),
                  p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name());
  }

  jlong cur_time = os::javaTimeMillis();
  o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);


  Klass* k_o = o->klass();
  Klass* klass = k_o;

  if (bulk_rebias) {
    // Use the epoch in the klass of the object to implicitly revoke
    // all biases of objects of this data type and force them to be
    // reacquired. However, we also need to walk the stacks of all
    // threads and update the headers of lightweight locked objects
    // with biases to have the current epoch.

    // If the prototype header doesn't have the bias pattern, don't
    // try to update the epoch -- assume another VM operation came in
    // and reset the header to the unbiased state, which will
    // implicitly cause all existing biases to be revoked
    if (klass->prototype_header()->has_bias_pattern()) {
      int prev_epoch = klass->prototype_header()->bias_epoch();
      klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
      int cur_epoch = klass->prototype_header()->bias_epoch();

      // Now walk all threads' stacks and adjust epochs of any biased
      // and locked objects of this data type we encounter
      for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
        GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
        for (int i = 0; i < cached_monitor_info->length(); i++) {
          MonitorInfo* mon_info = cached_monitor_info->at(i);
          oop owner = mon_info->owner();
          markOop mark = owner->mark();
          if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
            // We might have encountered this object already in the case of recursive locking
            assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
            owner->set_mark(mark->set_bias_epoch(cur_epoch));
          }
        }
      }
    }

    // At this point we're done. All we have to do is potentially
    // adjust the header of the given object to revoke its bias.
    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
  } else {
    if (TraceBiasedLocking) {
      ResourceMark rm;
      tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
    }

    // Disable biased locking for this data type. Not only will this
    // cause future instances to not be biased, but existing biased
    // instances will notice that this implicitly caused their biases
    // to be revoked.
    klass->set_prototype_header(markOopDesc::prototype());

    // Now walk all threads' stacks and forcibly revoke the biases of
    // any locked and biased objects of this data type we encounter.
    for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
      GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
      for (int i = 0; i < cached_monitor_info->length(); i++) {
        MonitorInfo* mon_info = cached_monitor_info->at(i);
        oop owner = mon_info->owner();
        markOop mark = owner->mark();
        if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
          revoke_bias(owner, false, true, requesting_thread);
        }
      }
    }

    // Must force the bias of the passed object to be forcibly revoked
    // as well to ensure guarantees to callers
    revoke_bias(o, false, true, requesting_thread);
  }

  if (TraceBiasedLocking) {
    tty->print_cr("* Ending bulk revocation");
  }

  BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;

  if (attempt_rebias_of_object &&
      o->mark()->has_bias_pattern() &&
      klass->prototype_header()->has_bias_pattern()) {
    markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
                                           klass->prototype_header()->bias_epoch());
    o->set_mark(new_mark);
    status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
    if (TraceBiasedLocking) {
      tty->print_cr("  Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
    }
  }

  assert(!o->mark()->has_bias_pattern() ||
         (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
         "bug in bulk bias revocation");

  return status_code;
}
예제 #9
0
static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
  markOop mark = obj->mark();
  if (!mark->has_bias_pattern()) {
    if (TraceBiasedLocking) {
      ResourceMark rm;
      tty->print_cr("  (Skipping revocation of object of type %s because it's no longer biased)",
                    obj->klass()->external_name());
    }
    return BiasedLocking::NOT_BIASED;
  }

  uint age = mark->age();
  markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
  markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);

  if (TraceBiasedLocking && (Verbose || !is_bulk)) {
    ResourceMark rm;
    tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
                  p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
  }

  JavaThread* biased_thread = mark->biased_locker();
  if (biased_thread == NULL) {
    // Object is anonymously biased. We can get here if, for
    // example, we revoke the bias due to an identity hash code
    // being computed for an object.
    if (!allow_rebias) {
      obj->set_mark(unbiased_prototype);
    }
    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
      tty->print_cr("  Revoked bias of anonymously-biased object");
    }
    return BiasedLocking::BIAS_REVOKED;
  }

  // Handle case where the thread toward which the object was biased has exited
  bool thread_is_alive = false;
  if (requesting_thread == biased_thread) {
    thread_is_alive = true;
  } else {
    for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
      if (cur_thread == biased_thread) {
        thread_is_alive = true;
        break;
      }
    }
  }
  if (!thread_is_alive) {
    if (allow_rebias) {
      obj->set_mark(biased_prototype);
    } else {
      obj->set_mark(unbiased_prototype);
    }
    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
      tty->print_cr("  Revoked bias of object biased toward dead thread");
    }
    return BiasedLocking::BIAS_REVOKED;
  }

  // Thread owning bias is alive.
  // Check to see whether it currently owns the lock and, if so,
  // write down the needed displaced headers to the thread's stack.
  // Otherwise, restore the object's header either to the unlocked
  // or unbiased state.
  GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
  BasicLock* highest_lock = NULL;
  for (int i = 0; i < cached_monitor_info->length(); i++) {
    MonitorInfo* mon_info = cached_monitor_info->at(i);
    if (mon_info->owner() == obj) {
      if (TraceBiasedLocking && Verbose) {
        tty->print_cr("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
                      p2i((void *) mon_info->owner()),
                      p2i((void *) obj));
      }
      // Assume recursive case and fix up highest lock later
      markOop mark = markOopDesc::encode((BasicLock*) NULL);
      highest_lock = mon_info->lock();
      highest_lock->set_displaced_header(mark);
    } else {
      if (TraceBiasedLocking && Verbose) {
        tty->print_cr("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
                      p2i((void *) mon_info->owner()),
                      p2i((void *) obj));
      }
    }
  }
  if (highest_lock != NULL) {
    // Fix up highest lock to contain displaced header and point
    // object at it
    highest_lock->set_displaced_header(unbiased_prototype);
    // Reset object header to point to displaced mark.
    // Must release storing the lock address for platforms without TSO
    // ordering (e.g. ppc).
    obj->release_set_mark(markOopDesc::encode(highest_lock));
    assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
      tty->print_cr("  Revoked bias of currently-locked object");
    }
  } else {
    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
      tty->print_cr("  Revoked bias of currently-unlocked object");
    }
    if (allow_rebias) {
      obj->set_mark(biased_prototype);
    } else {
      // Store the unlocked value into the object's header.
      obj->set_mark(unbiased_prototype);
    }
  }

  return BiasedLocking::BIAS_REVOKED;
}