static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
                                                                   bool bulk_rebias,
                                                                   bool attempt_rebias_of_object,
                                                                   JavaThread* requesting_thread) {
  assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");

  if (TraceBiasedLocking) {
    tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
                  INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
                  (bulk_rebias ? "rebias" : "revoke"),
                  (void *) o, (intptr_t) o->mark(), o->klass()->external_name());
  }

  jlong cur_time = os::javaTimeMillis();
  o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);


  Klass* k_o = o->klass();
  Klass* klass = k_o;

  if (bulk_rebias) {
    // Use the epoch in the klass of the object to implicitly revoke
    // all biases of objects of this data type and force them to be
    // reacquired. However, we also need to walk the stacks of all
    // threads and update the headers of lightweight locked objects
    // with biases to have the current epoch.

    // If the prototype header doesn't have the bias pattern, don't
    // try to update the epoch -- assume another VM operation came in
    // and reset the header to the unbiased state, which will
    // implicitly cause all existing biases to be revoked
    if (klass->prototype_header()->has_bias_pattern()) {
      int prev_epoch = klass->prototype_header()->bias_epoch();
      klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
      int cur_epoch = klass->prototype_header()->bias_epoch();

      // Now walk all threads' stacks and adjust epochs of any biased
      // and locked objects of this data type we encounter
      for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
        GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
        for (int i = 0; i < cached_monitor_info->length(); i++) {
          MonitorInfo* mon_info = cached_monitor_info->at(i);
          oop owner = mon_info->owner();
          markOop mark = owner->mark();
          if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
            // We might have encountered this object already in the case of recursive locking
            assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
            owner->set_mark(mark->set_bias_epoch(cur_epoch));
          }
        }
      }
    }

    // At this point we're done. All we have to do is potentially
    // adjust the header of the given object to revoke its bias.
    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
  } else {
    if (TraceBiasedLocking) {
      ResourceMark rm;
      tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
    }

    // Disable biased locking for this data type. Not only will this
    // cause future instances to not be biased, but existing biased
    // instances will notice that this implicitly caused their biases
    // to be revoked.
    klass->set_prototype_header(markOopDesc::prototype());

    // Now walk all threads' stacks and forcibly revoke the biases of
    // any locked and biased objects of this data type we encounter.
    for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
      GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
      for (int i = 0; i < cached_monitor_info->length(); i++) {
        MonitorInfo* mon_info = cached_monitor_info->at(i);
        oop owner = mon_info->owner();
        markOop mark = owner->mark();
        if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
          revoke_bias(owner, false, true, requesting_thread);
        }
      }
    }

    // Must force the bias of the passed object to be forcibly revoked
    // as well to ensure guarantees to callers
    revoke_bias(o, false, true, requesting_thread);
  }

  if (TraceBiasedLocking) {
    tty->print_cr("* Ending bulk revocation");
  }

  BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;

  if (attempt_rebias_of_object &&
      o->mark()->has_bias_pattern() &&
      klass->prototype_header()->has_bias_pattern()) {
    markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
                                           klass->prototype_header()->bias_epoch());
    o->set_mark(new_mark);
    status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
    if (TraceBiasedLocking) {
      tty->print_cr("  Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
    }
  }

  assert(!o->mark()->has_bias_pattern() ||
         (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
         "bug in bulk bias revocation");

  return status_code;
}
Beispiel #2
0
static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
  markOop mark = obj->mark();
  if (!mark->has_bias_pattern()) {
    if (TraceBiasedLocking) {
      ResourceMark rm;
      tty->print_cr("  (Skipping revocation of object of type %s because it's no longer biased)",
                    Klass::cast(obj->klass())->external_name());
    }
    return BiasedLocking::NOT_BIASED;
  }

  int age = mark->age();
  markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
  markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);

  if (TraceBiasedLocking && (Verbose || !is_bulk)) {
    ResourceMark rm;
    tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
                  (intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
  }

  JavaThread* biased_thread = mark->biased_locker();
  if (biased_thread == NULL) {
    // Object is anonymously biased. We can get here if, for
    // example, we revoke the bias due to an identity hash code
    // being computed for an object.
    if (!allow_rebias) {
      obj->set_mark(unbiased_prototype);
    }
    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
      tty->print_cr("  Revoked bias of anonymously-biased object");
    }
    return BiasedLocking::BIAS_REVOKED;
  }

  // Handle case where the thread toward which the object was biased has exited
  bool thread_is_alive = false;
  if (requesting_thread == biased_thread) {
    thread_is_alive = true;
  } else {
    for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
      if (cur_thread == biased_thread) {
        thread_is_alive = true;
        break;
      }
    }
  }
  if (!thread_is_alive) {
    if (allow_rebias) {
      obj->set_mark(biased_prototype);
    } else {
      obj->set_mark(unbiased_prototype);
    }
    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
      tty->print_cr("  Revoked bias of object biased toward dead thread");
    }
    return BiasedLocking::BIAS_REVOKED;
  }

  // Thread owning bias is alive.
  // Check to see whether it currently owns the lock and, if so,
  // write down the needed displaced headers to the thread's stack.
  // Otherwise, restore the object's header either to the unlocked
  // or unbiased state.
  GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
  BasicLock* highest_lock = NULL;
  for (int i = 0; i < cached_monitor_info->length(); i++) {
    MonitorInfo* mon_info = cached_monitor_info->at(i);
    if (mon_info->owner() == obj) {
      if (TraceBiasedLocking && Verbose) {
        tty->print_cr("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
                      (intptr_t) mon_info->owner(),
                      (intptr_t) obj);
      }
      // Assume recursive case and fix up highest lock later
      markOop mark = markOopDesc::encode((BasicLock*) NULL);
      highest_lock = mon_info->lock();
      highest_lock->set_displaced_header(mark);
    } else {
      if (TraceBiasedLocking && Verbose) {
        tty->print_cr("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
                      (intptr_t) mon_info->owner(),
                      (intptr_t) obj);
      }
    }
  }
  if (highest_lock != NULL) {
    // Fix up highest lock to contain displaced header and point
    // object at it
    highest_lock->set_displaced_header(unbiased_prototype);
    // Reset object header to point to displaced mark
    obj->set_mark(markOopDesc::encode(highest_lock));
    assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
      tty->print_cr("  Revoked bias of currently-locked object");
    }
  } else {
    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
      tty->print_cr("  Revoked bias of currently-unlocked object");
    }
    if (allow_rebias) {
      obj->set_mark(biased_prototype);
    } else {
      // Store the unlocked value into the object's header.
      obj->set_mark(unbiased_prototype);
    }
  }

  return BiasedLocking::BIAS_REVOKED;
}
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
        oop const old,
        markOop const old_mark) {
    const size_t word_sz = old->size();
    HeapRegion* const from_region = _g1h->heap_region_containing(old);
    // +1 to make the -1 indexes valid...
    const int young_index = from_region->young_index_in_cset()+1;
    assert( (from_region->is_young() && young_index >  0) ||
            (!from_region->is_young() && young_index == 0), "invariant" );
    const AllocationContext_t context = from_region->allocation_context();

    uint age = 0;
    InCSetState dest_state = next_state(state, old_mark, age);
    // The second clause is to prevent premature evacuation failure in case there
    // is still space in survivor, but old gen is full.
    if (_old_gen_is_full && dest_state.is_old()) {
        return handle_evacuation_failure_par(old, old_mark);
    }
    HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);

    // PLAB allocations should succeed most of the time, so we'll
    // normally check against NULL once and that's it.
    if (obj_ptr == NULL) {
        bool plab_refill_failed = false;
        obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed);
        if (obj_ptr == NULL) {
            obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed);
            if (obj_ptr == NULL) {
                // This will either forward-to-self, or detect that someone else has
                // installed a forwarding pointer.
                return handle_evacuation_failure_par(old, old_mark);
            }
        }
        if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
            // The events are checked individually as part of the actual commit
            report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
        }
    }

    assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
    assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");

#ifndef PRODUCT
    // Should this evacuation fail?
    if (_g1h->evacuation_should_fail()) {
        // Doing this after all the allocation attempts also tests the
        // undo_allocation() method too.
        _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
        return handle_evacuation_failure_par(old, old_mark);
    }
#endif // !PRODUCT

    // We're going to allocate linearly, so might as well prefetch ahead.
    Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);

    const oop obj = oop(obj_ptr);
    const oop forward_ptr = old->forward_to_atomic(obj);
    if (forward_ptr == NULL) {
        Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);

        if (dest_state.is_young()) {
            if (age < markOopDesc::max_age) {
                age++;
            }
            if (old_mark->has_displaced_mark_helper()) {
                // In this case, we have to install the mark word first,
                // otherwise obj looks to be forwarded (the old mark word,
                // which contains the forward pointer, was copied)
                obj->set_mark(old_mark);
                markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
                old_mark->set_displaced_mark_helper(new_mark);
            } else {
                obj->set_mark(old_mark->set_age(age));
            }
            _age_table.add(age, word_sz);
        } else {
            obj->set_mark(old_mark);
        }

        if (G1StringDedup::is_enabled()) {
            const bool is_from_young = state.is_young();
            const bool is_to_young = dest_state.is_young();
            assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
                   "sanity");
            assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
                   "sanity");
            G1StringDedup::enqueue_from_evacuation(is_from_young,
                                                   is_to_young,
                                                   _worker_id,
                                                   obj);
        }

        _surviving_young_words[young_index] += word_sz;

        if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
            // We keep track of the next start index in the length field of
            // the to-space object. The actual length can be found in the
            // length field of the from-space object.
            arrayOop(obj)->set_length(0);
            oop* old_p = set_partial_array_mask(old);
            push_on_queue(old_p);
        } else {
            HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
            _scanner.set_region(to_region);
            obj->oop_iterate_backwards(&_scanner);
        }
        return obj;
    } else {
        _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
        return forward_ptr;
    }
}