void CMSLockVerifier::assert_locked(const Mutex* lock, const Mutex* p_lock) {
  if (ParallelGCThreads == 0) {
    assert_lock_strong(lock);
  } else {
    Thread* myThread = Thread::current();
    if (myThread->is_VM_thread()
        || myThread->is_ConcurrentMarkSweep_thread()
        || myThread->is_Java_thread()) {
      // Make sure that we are holding the free list lock.
      assert_lock_strong(lock);
      // The checking of p_lock is a spl case for CFLS' free list
      // locks: we make sure that none of the parallel GC work gang
      // threads are holding "sub-locks" of freeListLock(). We check only
      // the parDictionaryAllocLock because the others are too numerous.
      // This spl case code is somewhat ugly and any improvements
      // are welcome XXX FIX ME!!
      if (p_lock != NULL) {
        assert(!p_lock->is_locked() || p_lock->owned_by_self(),
               "Possible race between this and parallel GC threads");
      }
    } else if (myThread->is_GC_task_thread()) {
      // Make sure that the VM or CMS thread holds lock on our behalf
      // XXX If there were a concept of a gang_master for a (set of)
      // gang_workers, we could have used the identity of that thread
      // for checking ownership here; for now we just disjunct.
      assert(lock->owner() == VMThread::vm_thread() ||
             lock->owner() == ConcurrentMarkSweepThread::first_thread(),
             "Should be locked by VM thread or CMS thread on my behalf");
    } else {
      // Make sure we didn't miss some obscure corner case
      ShouldNotReachHere();
    }
  }
}
Esempio n. 2
0
 ~MonitorLockerEx() {
   #ifdef ASSERT
     if (_monitor != NULL) {
       assert_lock_strong(_monitor);
     }
   #endif  // ASSERT
   // Superclass destructor will do unlocking
 }
Esempio n. 3
0
void PSOldGen::expand_to_reserved() {
  assert_lock_strong(ExpandHeap_lock);
  assert_locked_or_safepoint(Heap_lock);
  size_t remaining_bytes = _virtual_space.uncommitted_size();
  if (remaining_bytes > 0) {
    bool success = expand_by(remaining_bytes);
    assert(success, "grow to reserved failed");
  }
}
Esempio n. 4
0
void PSOldGen::shrink(size_t bytes) {
  assert_lock_strong(ExpandHeap_lock);
  assert_locked_or_safepoint(Heap_lock);
  size_t size = ReservedSpace::page_align_size_down(bytes);
  if (size > 0) {
    assert_lock_strong(ExpandHeap_lock);
    _virtual_space.shrink_by(bytes);
    post_resize();

    if (Verbose && PrintGC) {
      size_t new_mem_size = _virtual_space.committed_size();
      size_t old_mem_size = new_mem_size - bytes;
      gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " 
                                         SIZE_FORMAT "K to " 
                                         SIZE_FORMAT "K",
                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
    }
  }
}
Esempio n. 5
0
// Make checks on the current sizes of the generations and
// the constraints on the sizes of the generations.  Push
// up the boundary within the constraints.  A partial
// push can occur.
void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
  assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");

  assert_lock_strong(ExpandHeap_lock);
  assert_locked_or_safepoint(Heap_lock);

  // These sizes limit the amount the boundaries can move.  Effectively,
  // the generation says how much it is willing to yield to the other
  // generation.
  const size_t young_gen_available = young_gen()->available_for_contraction();
  const size_t old_gen_available = old_gen()->available_for_expansion();
  const size_t alignment = virtual_spaces()->alignment();
  size_t change_in_bytes = MIN3(young_gen_available,
                                old_gen_available,
                                align_size_up_(expand_in_bytes, alignment));

  if (change_in_bytes == 0) {
    return;
  }

  if (TraceAdaptiveGCBoundary) {
    gclog_or_tty->print_cr("Before expansion of old gen with boundary move");
    gclog_or_tty->print_cr("  Requested change: " SIZE_FORMAT_HEX
                           "  Attempted change: " SIZE_FORMAT_HEX,
      expand_in_bytes, change_in_bytes);
    if (!PrintHeapAtGC) {
      Universe::print_on(gclog_or_tty);
    }
    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K",
      old_gen()->max_gen_size()/K);
  }

  // Move the boundary between the generations up (smaller young gen).
  if (virtual_spaces()->adjust_boundary_up(change_in_bytes)) {
    young_gen()->reset_after_change();
    old_gen()->reset_after_change();
  }

  // The total reserved for the generations should match the sum
  // of the two even if the boundary is moving.
  assert(reserved_byte_size() ==
         old_gen()->max_gen_size() + young_gen()->max_size(),
         "Space is missing");
  young_gen()->space_invariants();
  old_gen()->space_invariants();

  if (TraceAdaptiveGCBoundary) {
    gclog_or_tty->print_cr("After expansion of old gen with boundary move");
    if (!PrintHeapAtGC) {
      Universe::print_on(gclog_or_tty);
    }
    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K",
      old_gen()->max_gen_size()/K);
  }
}
//--- DerivedPointerTable::add -----------------------------------------------
// Called during scavenge/GC
void DerivedPointerTable::add(objectRef*base,objectRef*derived){
  assert0( !UseGenPauselessGC );

  assert_lock_strong(DerivedPointerTableGC_lock);
  assert0( _active );
  _base_derived_pairs->push((intptr_t)base);
  _base_derived_pairs->push((intptr_t)derived);
  intptr_t offset = (*derived).raw_value() - (*base).raw_value();
  assert(offset >= -1000000, "wrong derived pointer info");
  _base_derived_pairs->push(offset);
}
Esempio n. 7
0
void FreeBlockDictionary::verify_par_locked() const {
#ifdef ASSERT
  if (ParallelGCThreads > 0) {
    Thread* myThread = Thread::current();
    if (myThread->is_GC_task_thread()) {
      assert(par_lock() != NULL, "Should be using locking?");
      assert_lock_strong(par_lock());
    }
  }
#endif // ASSERT
}
Esempio n. 8
0
 bool check_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
   assert_lock_strong(SystemDictionary_lock);
   SeenThread* threadQ = actionToQueue(action);
   SeenThread* seen = threadQ;
   while (seen) {
     if (thread == seen->thread()) {
       return true;
     }
     seen = seen->next();
   }
   return false;
 }
Esempio n. 9
0
// Doubly-linked list of Threads per action for class/classloader pair
// Class circularity support: links in thread before loading superclass
// bootstrapsearchpath support: links in a thread before load_instance_class
// definers: use as queue of define requestors, including owner of
// define token. Appends for debugging of requestor order
  void add_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
    assert_lock_strong(SystemDictionary_lock);
    SeenThread* threadEntry = new SeenThread(thread);
    SeenThread* seen = actionToQueue(action);

    if (seen == NULL) {
      set_threadQ(threadEntry, action);
      return;
    }
    SeenThread* next;
    while ((next = seen->next()) != NULL) {
      seen = next;
    }
    seen->set_next(threadEntry);
    threadEntry->set_prev(seen);
    return;
  }
Esempio n. 10
0
//
// Add an nmethod to the dependency context.
// It's possible that an nmethod has multiple dependencies on a klass
// so a count is kept for each bucket to guarantee that creation and
// deletion of dependencies is consistent.
//
void DependencyContext::add_dependent_nmethod(nmethod* nm, bool expunge) {
  assert_lock_strong(CodeCache_lock);
  for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
    if (nm == b->get_nmethod()) {
      b->increment();
      return;
    }
  }
  set_dependencies(new nmethodBucket(nm, dependencies()));
  if (UsePerfData) {
    _perf_total_buckets_allocated_count->inc();
  }
  if (expunge) {
    // Remove stale entries from the list.
    expunge_stale_entries();
  }
}
Esempio n. 11
0
 // returns true if seenthreadQ is now empty
 // Note, caller must ensure probe still exists while holding
 // SystemDictionary_lock
 // ignores if cleanup has already been done
 // if found, deletes SeenThread
 bool remove_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
   assert_lock_strong(SystemDictionary_lock);
   SeenThread* threadQ = actionToQueue(action);
   SeenThread* seen = threadQ;
   SeenThread* prev = NULL;
   while (seen) {
     if (thread == seen->thread()) {
       if (prev) {
         prev->set_next(seen->next());
       } else {
         set_threadQ(seen->next(), action);
       }
       if (seen->next()) {
         seen->next()->set_prev(prev);
       }
       delete seen;
       break;
     }
     prev = seen;
     seen = seen->next();
   }
   return (actionToQueue(action) == NULL);
 }
Esempio n. 12
0
bool PSOldGen::expand_by(size_t bytes) {
  assert_lock_strong(ExpandHeap_lock);
  assert_locked_or_safepoint(Heap_lock);
  bool result = _virtual_space.expand_by(bytes);
  if (result) {
    post_resize();
    if (UsePerfData) {
      _space_counters->update_capacity();
      _gen_counters->update_all();
    }
  }

  if (result && Verbose && PrintGC) {
    size_t new_mem_size = _virtual_space.committed_size();
    size_t old_mem_size = new_mem_size - bytes;
    gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " 
                                       SIZE_FORMAT "K to " 
                                       SIZE_FORMAT "K",
                    name(), old_mem_size/K, bytes/K, new_mem_size/K);
  }

  return result;
}
Esempio n. 13
0
 static void clear_needs_gc() {
   assert_lock_strong(JNICritical_lock);
   _needs_gc = false;
 }
Esempio n. 14
0
bool SuspendibleThreadSet::is_synchronized() {
  assert_lock_strong(STS_lock);
  assert(_nthreads_stopped <= _nthreads, "invariant");
  return _nthreads_stopped == _nthreads;
}
inline void CMSBitMap::clearAll() {
  assert_lock_strong(&_lock);
  _bm.clear();
  return;
}
inline void CMSBitMap::mark(HeapWord* addr) {
  assert_lock_strong(lock());
  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
         "outside underlying space?");
  _bm.set_bit(heapWordToOffset(addr));
}